diff --git a/.gitignore b/.gitignore index 886f4432..7175c4f8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,12 @@ # Ignore all bazel-* symlinks. /bazel-* +# Ignore Bazel verbose explanations +--verbose_explanations +# Ignore CMake usual build directory +build +# Ignore Vim files +*.swp +# Ignore QtCreator Project file +CMakeLists.txt.user +# Ignore VS Code files +.vscode/* diff --git a/CMake/AbseilConfigureCopts.cmake b/CMake/AbseilConfigureCopts.cmake new file mode 100644 index 00000000..96e0390b --- /dev/null +++ b/CMake/AbseilConfigureCopts.cmake @@ -0,0 +1,145 @@ +# Abseil-specific compiler flags. See absl/copts.bzl for description. +# DO NOT CHANGE THIS FILE WITHOUT THE CORRESPONDING CHANGE TO absl/copts.bzl + +list(APPEND GCC_FLAGS + -Wall + -Wextra + -Wcast-qual + -Wconversion-null + -Wmissing-declarations + -Woverlength-strings + -Wpointer-arith + -Wunused-local-typedefs + -Wunused-result + -Wvarargs + -Wwrite-strings + -Wno-sign-compare +) + +list(APPEND GCC_TEST_FLAGS + -Wno-conversion-null + -Wno-missing-declarations + -Wno-sign-compare + -Wno-unused-function + -Wno-unused-parameter + -Wno-unused-private-field +) + +list(APPEND LLVM_FLAGS + -Wall + -Wextra + -Weverything + -Wno-c++98-compat-pedantic + -Wno-conversion + -Wno-covered-switch-default + -Wno-deprecated + -Wno-disabled-macro-expansion + -Wno-double-promotion + -Wno-comma + -Wno-extra-semi + -Wno-packed + -Wno-padded + -Wno-sign-compare + -Wno-float-conversion + -Wno-float-equal + -Wno-format-nonliteral + -Wno-gcc-compat + -Wno-global-constructors + -Wno-exit-time-destructors + -Wno-nested-anon-types + -Wno-non-modular-include-in-module + -Wno-old-style-cast + -Wno-range-loop-analysis + -Wno-reserved-id-macro + -Wno-shorten-64-to-32 + -Wno-switch-enum + -Wno-thread-safety-negative + -Wno-undef + -Wno-unknown-warning-option + -Wno-unreachable-code + -Wno-unused-macros + -Wno-weak-vtables + -Wbitfield-enum-conversion + -Wbool-conversion + -Wconstant-conversion + -Wenum-conversion + -Wint-conversion + -Wliteral-conversion + -Wnon-literal-null-conversion + -Wnull-conversion + -Wobjc-literal-conversion + -Wno-sign-conversion + -Wstring-conversion +) + +list(APPEND LLVM_TEST_FLAGS + -Wno-c99-extensions + -Wno-missing-noreturn + -Wno-missing-prototypes + -Wno-missing-variable-declarations + -Wno-null-conversion + -Wno-shadow + -Wno-shift-sign-overflow + -Wno-sign-compare + -Wno-unused-function + -Wno-unused-member-function + -Wno-unused-parameter + -Wno-unused-private-field + -Wno-unused-template + -Wno-used-but-marked-unused + -Wno-zero-as-null-pointer-constant + -Wno-gnu-zero-variadic-macro-arguments +) + +list(APPEND MSVC_FLAGS + /W3 + /wd4005 + /wd4018 + /wd4068 + /wd4180 + /wd4244 + /wd4267 + /wd4800 + /DNOMINMAX + /DWIN32_LEAN_AND_MEAN + /D_CRT_SECURE_NO_WARNINGS + /D_SCL_SECURE_NO_WARNINGS + /D_ENABLE_EXTENDED_ALIGNED_STORAGE +) + +list(APPEND MSVC_TEST_FLAGS + /wd4101 + /wd4503 +) + +if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") + set(ABSL_DEFAULT_COPTS "${GCC_FLAGS}") + set(ABSL_TEST_COPTS "${GCC_FLAGS};${GCC_TEST_FLAGS}") + set(ABSL_EXCEPTIONS_FLAG "-fexceptions") +elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + # MATCHES so we get both Clang and AppleClang + set(ABSL_DEFAULT_COPTS "${LLVM_FLAGS}") + set(ABSL_TEST_COPTS "${LLVM_FLAGS};${LLVM_TEST_FLAGS}") + set(ABSL_EXCEPTIONS_FLAG "-fexceptions") +elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") + set(ABSL_DEFAULT_COPTS "${MSVC_FLAGS}") + set(ABSL_TEST_COPTS "${MSVC_FLAGS};${MSVC_TEST_FLAGS}") + set(ABSL_EXCEPTIONS_FLAG "/U_HAS_EXCEPTIONS;/D_HAS_EXCEPTIONS=1;/EHsc") +else() + message(WARNING "Unknown compiler: ${CMAKE_CXX_COMPILER}. Building with no default flags") + set(ABSL_DEFAULT_COPTS "") + set(ABSL_TEST_COPTS "") + set(ABSL_EXCEPTIONS_FLAG "") +endif() + +# This flag is used internally for Bazel builds and is kept here for consistency +set(ABSL_EXCEPTIONS_FLAG_LINKOPTS "") + +if("${CMAKE_CXX_STANDARD}" EQUAL 98) + message(FATAL_ERROR "Abseil requires at least C++11") +elseif(NOT "${CMAKE_CXX_STANDARD}") + message(STATUS "No CMAKE_CXX_STANDARD set, assuming 11") + set(ABSL_CXX_STANDARD 11) +else() + set(ABSL_CXX_STANDARD "${CMAKE_CXX_STANDARD}") +endif() diff --git a/CMake/AbseilHelpers.cmake b/CMake/AbseilHelpers.cmake index e4eafe49..5402bf51 100644 --- a/CMake/AbseilHelpers.cmake +++ b/CMake/AbseilHelpers.cmake @@ -15,6 +15,7 @@ # include(CMakeParseArguments) +include(AbseilConfigureCopts) # The IDE folder for Abseil that will be used if Abseil is included in a CMake # project that sets @@ -48,7 +49,11 @@ function(absl_library) add_library(${_NAME} STATIC ${ABSL_LIB_SOURCES}) - target_compile_options(${_NAME} PRIVATE ${ABSL_COMPILE_CXXFLAGS} ${ABSL_LIB_PRIVATE_COMPILE_FLAGS}) + target_compile_options(${_NAME} + PRIVATE + ${ABSL_LIB_PRIVATE_COMPILE_FLAGS} + ${ABSL_DEFAULT_COPTS} + ) target_link_libraries(${_NAME} PUBLIC ${ABSL_LIB_PUBLIC_LIBRARIES}) target_include_directories(${_NAME} PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} ${ABSL_LIB_PUBLIC_INCLUDE_DIRS} @@ -57,12 +62,199 @@ function(absl_library) # Add all Abseil targets to a a folder in the IDE for organization. set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) + if(ABSL_LIB_EXPORT_NAME) add_library(absl::${ABSL_LIB_EXPORT_NAME} ALIAS ${_NAME}) endif() endfunction() +# CMake function to imitate Bazel's cc_library rule. +# +# Parameters: +# NAME: name of target (see Note) +# HDRS: List of public header files for the library +# SRCS: List of source files for the library +# DEPS: List of other libraries to be linked in to the binary targets +# COPTS: List of private compile options +# DEFINES: List of public defines +# LINKOPTS: List of link options +# PUBLIC: Add this so that this library will be exported under absl:: (see Note). +# Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal. +# TESTONLY: When added, this target will only be built if user passes -DABSL_RUN_TESTS=ON to CMake. +# +# Note: +# By default, absl_cc_library will always create a library named absl_internal_${NAME}, +# and alias target absl::${NAME}. +# This is to reduce namespace pollution. +# +# absl_cc_library( +# NAME +# awesome +# HDRS +# "a.h" +# SRCS +# "a.cc" +# ) +# absl_cc_library( +# NAME +# fantastic_lib +# SRCS +# "b.cc" +# DEPS +# absl_internal_awesome # not "awesome"! +# ) +# +# If PUBLIC is set, absl_cc_library will instead create a target named +# absl_${NAME} and still an alias absl::${NAME}. +# +# absl_cc_library( +# NAME +# main_lib +# ... +# PUBLIC +# ) +# +# User can then use the library as absl::main_lib (although absl_main_lib is defined too). +# +# TODO: Implement "ALWAYSLINK" +function(absl_cc_library) + cmake_parse_arguments(ABSL_CC_LIB + "DISABLE_INSTALL;PUBLIC;TESTONLY" + "NAME" + "HDRS;SRCS;COPTS;DEFINES;LINKOPTS;DEPS" + ${ARGN} + ) + + if (NOT ABSL_CC_LIB_TESTONLY OR ABSL_RUN_TESTS) + if (ABSL_CC_LIB_PUBLIC) + set(_NAME "absl_${ABSL_CC_LIB_NAME}") + else() + set(_NAME "absl_internal_${ABSL_CC_LIB_NAME}") + endif() + + # Check if this is a header-only library + if ("${ABSL_CC_LIB_SRCS}" STREQUAL "") + set(ABSL_CC_LIB_IS_INTERFACE 1) + else() + set(ABSL_CC_LIB_IS_INTERFACE 0) + endif() + + if(NOT ABSL_CC_LIB_IS_INTERFACE) + add_library(${_NAME} STATIC "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) + target_include_directories(${_NAME} + PUBLIC ${ABSL_COMMON_INCLUDE_DIRS}) + target_compile_options(${_NAME} + PRIVATE ${ABSL_CC_LIB_COPTS}) + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_LIB_DEPS} + PRIVATE ${ABSL_CC_LIB_LINKOPTS} + ) + target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_LIB_DEFINES}) + + # Add all Abseil targets to a a folder in the IDE for organization. + if(ABSL_CC_LIB_PUBLIC) + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}) + elseif(ABSL_CC_LIB_TESTONLY) + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) + else() + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/internal) + endif() + + # INTERFACE libraries can't have the CXX_STANDARD property set + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) + else() + # Generating header-only library + add_library(${_NAME} INTERFACE) + target_include_directories(${_NAME} + INTERFACE ${ABSL_COMMON_INCLUDE_DIRS}) + target_link_libraries(${_NAME} + INTERFACE ${ABSL_CC_LIB_DEPS} ${ABSL_CC_LIB_LINKOPTS} + ) + target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES}) + endif() + + add_library(absl::${ABSL_CC_LIB_NAME} ALIAS ${_NAME}) + endif() +endfunction() + +# absl_cc_test() +# +# CMake function to imitate Bazel's cc_test rule. +# +# Parameters: +# NAME: name of target (see Usage below) +# SRCS: List of source files for the binary +# DEPS: List of other libraries to be linked in to the binary targets +# COPTS: List of private compile options +# DEFINES: List of public defines +# LINKOPTS: List of link options +# +# Note: +# By default, absl_cc_test will always create a binary named absl_${NAME}. +# This will also add it to ctest list as absl_${NAME}. +# +# Usage: +# absl_cc_library( +# NAME +# awesome +# HDRS +# "a.h" +# SRCS +# "a.cc" +# PUBLIC +# ) +# +# absl_cc_test( +# NAME +# awesome_test +# SRCS +# "awesome_test.cc" +# DEPS +# absl::awesome +# gmock +# gtest_main +# ) +function(absl_cc_test) + if(NOT ABSL_RUN_TESTS) + return() + endif() + + cmake_parse_arguments(ABSL_CC_TEST + "" + "NAME" + "SRCS;COPTS;DEFINES;LINKOPTS;DEPS" + ${ARGN} + ) + set(_NAME "absl_${ABSL_CC_TEST_NAME}") + add_executable(${_NAME} "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_TEST_SRCS}) + target_include_directories(${_NAME} + PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} + PRIVATE ${GMOCK_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS} + ) + target_compile_definitions(${_NAME} + PUBLIC ${ABSL_CC_TEST_DEFINES} + ) + target_compile_options(${_NAME} + PRIVATE ${ABSL_CC_TEST_COPTS} + ) + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_TEST_DEPS} + PRIVATE ${ABSL_CC_TEST_LINKOPTS} + ) + # Add all Abseil targets to a a folder in the IDE for organization. + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) + + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) + + add_test(NAME ${_NAME} COMMAND ${_NAME}) +endfunction() # # header only virtual target creation @@ -103,13 +295,15 @@ function(absl_header_library) # Add all Abseil targets to a a folder in the IDE for organization. set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) + if(ABSL_HO_LIB_EXPORT_NAME) add_library(absl::${ABSL_HO_LIB_EXPORT_NAME} ALIAS ${_NAME}) endif() endfunction() - # # create an abseil unit_test and add it to the executed test list # @@ -123,7 +317,7 @@ endfunction() # # all tests will be register for execution with add_test() # -# test compilation and execution is disable when BUILD_TESTING=OFF +# test compilation and execution is disable when ABSL_RUN_TESTS=OFF # function(absl_test) @@ -135,25 +329,32 @@ function(absl_test) ) - if(BUILD_TESTING) + if(ABSL_RUN_TESTS) - set(_NAME ${ABSL_TEST_TARGET}) + set(_NAME "absl_${ABSL_TEST_TARGET}") string(TOUPPER ${_NAME} _UPPER_NAME) - add_executable(${_NAME}_bin ${ABSL_TEST_SOURCES}) + add_executable(${_NAME} ${ABSL_TEST_SOURCES}) - target_compile_options(${_NAME}_bin PRIVATE ${ABSL_COMPILE_CXXFLAGS} ${ABSL_TEST_PRIVATE_COMPILE_FLAGS}) - target_link_libraries(${_NAME}_bin PUBLIC ${ABSL_TEST_PUBLIC_LIBRARIES} ${ABSL_TEST_COMMON_LIBRARIES}) - target_include_directories(${_NAME}_bin + target_compile_options(${_NAME} + PRIVATE + ${ABSL_TEST_PRIVATE_COMPILE_FLAGS} + ${ABSL_TEST_COPTS} + ) + target_link_libraries(${_NAME} PUBLIC ${ABSL_TEST_PUBLIC_LIBRARIES} ${ABSL_TEST_COMMON_LIBRARIES}) + target_include_directories(${_NAME} PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} ${ABSL_TEST_PUBLIC_INCLUDE_DIRS} PRIVATE ${GMOCK_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS} ) # Add all Abseil targets to a a folder in the IDE for organization. - set_property(TARGET ${_NAME}_bin PROPERTY FOLDER ${ABSL_IDE_FOLDER}) + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}) + + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) - add_test(${_NAME} ${_NAME}_bin) - endif(BUILD_TESTING) + add_test(NAME ${_NAME} COMMAND ${_NAME}) + endif(ABSL_RUN_TESTS) endfunction() diff --git a/CMake/DownloadGTest.cmake b/CMake/DownloadGTest.cmake index 9d413215..3c682aef 100644 --- a/CMake/DownloadGTest.cmake +++ b/CMake/DownloadGTest.cmake @@ -4,7 +4,7 @@ # Download the latest googletest from Github master configure_file( ${CMAKE_CURRENT_LIST_DIR}/CMakeLists.txt.in - googletest-download/CMakeLists.txt + ${CMAKE_BINARY_DIR}/googletest-download/CMakeLists.txt ) # Configure and build the downloaded googletest source diff --git a/CMakeLists.txt b/CMakeLists.txt index 89a3386f..1eafa407 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,9 +17,15 @@ # We require 3.0 for modern, target-based CMake. We require 3.1 for the use of # CXX_STANDARD in our targets. cmake_minimum_required(VERSION 3.1) + +# Compiler id for Apple Clang is now AppleClang. +if (POLICY CMP0025) + cmake_policy(SET CMP0025 NEW) +endif() + project(absl) -list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/CMake) +list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/CMake) include(GNUInstallDirs) include(AbseilHelpers) @@ -32,8 +38,16 @@ if (MSVC) # /wd4244 conversion from 'type1' to 'type2' # /wd4267 conversion from 'size_t' to 'type2' # /wd4800 force value to bool 'true' or 'false' (performance warning) - add_compile_options(/W3 /WX /wd4005 /wd4068 /wd4244 /wd4267 /wd4800) - add_definitions(/DNOMINMAX /DWIN32_LEAN_AND_MEAN=1 /D_CRT_SECURE_NO_WARNINGS /D_SCL_SECURE_NO_WARNINGS) + add_compile_options(/W3 /wd4005 /wd4068 /wd4244 /wd4267 /wd4800) + # /D_ENABLE_EXTENDED_ALIGNED_STORAGE Introduced in VS 2017 15.8, before the + # member type would non-conformingly have an alignment of only alignof(max_align_t). + add_definitions( + /DNOMINMAX + /DWIN32_LEAN_AND_MEAN=1 + /D_CRT_SECURE_NO_WARNINGS + /D_SCL_SECURE_NO_WARNINGS + /D_ENABLE_EXTENDED_ALIGNED_STORAGE + ) else() set(ABSL_STD_CXX_FLAG "-std=c++11" CACHE STRING "c++ std flag (default: c++11)") endif() @@ -60,6 +74,12 @@ set(CMAKE_CXX_FLAGS "${ABSL_STD_CXX_FLAG} ${CMAKE_CXX_FLAGS}") # -fexceptions set(ABSL_EXCEPTIONS_FLAG "${CMAKE_CXX_EXCEPTIONS}") +if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + set(ABSL_USING_CLANG ON) +else() + set(ABSL_USING_CLANG OFF) +endif() + # find dependencies ## pthread find_package(Threads REQUIRED) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 40351ddc..f4cb4a29 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,6 +18,53 @@ You generally only need to submit a CLA once, so if you've already submitted one (even if it was for a different project), you probably don't need to do it again. +## Contribution Guidelines + +Potential contributors sometimes ask us if the Abseil project is the appropriate +home for their utility library code or for specific functions implementing +missing portions of the standard. Often, the answer to this question is "no". +We’d like to articulate our thinking on this issue so that our choices can be +understood by everyone and so that contributors can have a better intuition +about whether Abseil might be interested in adopting a new library. + +### Priorities + +Although our mission is to augment the C++ standard library, our goal is not to +provide a full forward-compatible implementation of the latest standard. For us +to consider a library for inclusion in Abseil, it is not enough that a library +is useful. We generally choose to release a library when it meets at least one +of the following criteria: + +* **Widespread usage** - Using our internal codebase to help gauge usage, most + of the libraries we've released have tens of thousands of users. +* **Anticipated widespread usage** - Pre-adoption of some standard-compliant + APIs may not have broad adoption initially but can be expected to pick up + usage when it replaces legacy APIs. `absl::from_chars`, for example, + replaces existing code that converts strings to numbers and will therefore + likely see usage growth. +* **High impact** - APIs that provide a key solution to a specific problem, + such as `absl::FixedArray`, have higher impact than usage numbers may signal + and are released because of their importance. +* **Direct support for a library that falls under one of the above** - When we + want access to a smaller library as an implementation detail for a + higher-priority library we plan to release, we may release it, as we did + with portions of `absl/meta/type_traits.h`. One consequence of this is that + the presence of a library in Abseil does not necessarily mean that other + similar libraries would be a high priority. + +### API Freeze Consequences + +Via the +[Abseil Compatibility Guidelines](https://abseil.io/about/compatibility), we +have promised a large degree of API stability. In particular, we will not make +backward-incompatible changes to released APIs without also shipping a tool or +process that can upgrade our users' code. We are not yet at the point of easily +releasing such tools. Therefore, at this time, shipping a library establishes an +API contract which is borderline unchangeable. (We can add new functionality, +but we cannot easily change existing behavior.) This constraint forces us to +very carefully review all APIs that we ship. + + ## Coding Style To keep the source consistent, readable, diffable and easy to merge, we use a diff --git a/README.md b/README.md index 8eed5751..e9362be2 100644 --- a/README.md +++ b/README.md @@ -63,10 +63,14 @@ Abseil contains the following C++ library components:
The `algorithm` library contains additions to the C++ `` library and container-based versions of such algorithms. * [`container`](absl/container/) -
The `container` library contains additional STL-style containers. +
The `container` library contains additional STL-style containers, + including Abseil's unordered "Swiss table" containers. * [`debugging`](absl/debugging/)
The `debugging` library contains code useful for enabling leak - checks. Future updates will add stacktrace and symbolization utilities. + checks, and stacktrace and symbolization utilities. +* [`hash`](absl/hash/) +
The `hash` library contains the hashing framework and default hash + functor implementations for hashable types in Abseil. * [`memory`](absl/memory/)
The `memory` library contains C++11-compatible versions of `std::make_unique()` and related memory management facilities. @@ -90,6 +94,8 @@ Abseil contains the following C++ library components: * [`types`](absl/types/)
The `types` library contains non-container utility types, like a C++11-compatible version of the C++17 `std::optional` type. +* [`utility`](absl/utility/) +
The `utility` library contains utility and helper code. ## License diff --git a/WORKSPACE b/WORKSPACE index a7b1c139..72ef1398 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,21 +1,23 @@ workspace(name = "com_google_absl") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + # Bazel toolchains http_archive( name = "bazel_toolchains", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/287b64e0a211fb7c23b74695f8d5f5205b61f4eb.tar.gz", - "https://github.com/bazelbuild/bazel-toolchains/archive/287b64e0a211fb7c23b74695f8d5f5205b61f4eb.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/bc09b995c137df042bb80a395b73d7ce6f26afbe.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/archive/bc09b995c137df042bb80a395b73d7ce6f26afbe.tar.gz", ], - strip_prefix = "bazel-toolchains-287b64e0a211fb7c23b74695f8d5f5205b61f4eb", - sha256 = "aca8ac6afd7745027ee4a43032b51a725a61a75a30f02cc58681ee87e4dcdf4b", + strip_prefix = "bazel-toolchains-bc09b995c137df042bb80a395b73d7ce6f26afbe", + sha256 = "4329663fe6c523425ad4d3c989a8ac026b04e1acedeceb56aa4b190fa7f3973c", ) # GoogleTest/GoogleMock framework. Used by most unit-tests. http_archive( name = "com_google_googletest", - urls = ["https://github.com/google/googletest/archive/4e4df226fc197c0dda6e37f5c8c3845ca1e73a49.zip"], - strip_prefix = "googletest-4e4df226fc197c0dda6e37f5c8c3845ca1e73a49", - sha256 = "d4179caf54410968d1fff0b869e7d74803dd30209ee6645ccf1ca65ab6cf5e5a", + urls = ["https://github.com/google/googletest/archive/b4d4438df9479675a632b2f11125e57133822ece.zip"], # 2018-07-16 + strip_prefix = "googletest-b4d4438df9479675a632b2f11125e57133822ece", + sha256 = "5aaa5d566517cae711e2a3505ea9a6438be1b37fcaae0ebcb96ccba9aa56f23a", ) # Google benchmark. @@ -25,11 +27,3 @@ http_archive( strip_prefix = "benchmark-16703ff83c1ae6d53e5155df3bb3ab0bc96083be", sha256 = "59f918c8ccd4d74b6ac43484467b500f1d64b40cc1010daa055375b322a43ba3", ) - -# RE2 regular-expression framework. Used by some unit-tests. -http_archive( - name = "com_googlesource_code_re2", - urls = ["https://github.com/google/re2/archive/6cf8ccd82dbaab2668e9b13596c68183c9ecd13f.zip"], - strip_prefix = "re2-6cf8ccd82dbaab2668e9b13596c68183c9ecd13f", - sha256 = "279a852219dbfc504501775596089d30e9c0b29664ce4128b0ac4c841471a16a", -) diff --git a/absl/BUILD.bazel b/absl/BUILD.bazel index 439addbf..edd0274c 100644 --- a/absl/BUILD.bazel +++ b/absl/BUILD.bazel @@ -18,11 +18,10 @@ package(default_visibility = ["//visibility:public"]) licenses(["notice"]) # Apache 2.0 -config_setting( +load(":compiler_config_setting.bzl", "create_llvm_config") + +create_llvm_config( name = "llvm_compiler", - values = { - "compiler": "llvm", - }, visibility = [":__subpackages__"], ) diff --git a/absl/CMakeLists.txt b/absl/CMakeLists.txt index 689f64e2..1d09b193 100644 --- a/absl/CMakeLists.txt +++ b/absl/CMakeLists.txt @@ -20,6 +20,7 @@ add_subdirectory(base) add_subdirectory(algorithm) add_subdirectory(container) add_subdirectory(debugging) +add_subdirectory(hash) add_subdirectory(memory) add_subdirectory(meta) add_subdirectory(numeric) diff --git a/absl/algorithm/CMakeLists.txt b/absl/algorithm/CMakeLists.txt index fdf45c55..87a165c0 100644 --- a/absl/algorithm/CMakeLists.txt +++ b/absl/algorithm/CMakeLists.txt @@ -14,50 +14,50 @@ # limitations under the License. # -list(APPEND ALGORITHM_PUBLIC_HEADERS - "algorithm.h" - "container.h" -) - - -# -## TESTS -# - -# test algorithm_test -list(APPEND ALGORITHM_TEST_SRC - "algorithm_test.cc" - ${ALGORITHM_PUBLIC_HEADERS} - ${ALGORITHM_INTERNAL_HEADERS} -) - -absl_header_library( - TARGET - absl_algorithm - EXPORT_NAME +absl_cc_library( + NAME algorithm + HDRS + "algorithm.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC ) -absl_test( - TARGET +absl_cc_test( + NAME algorithm_test - SOURCES - ${ALGORITHM_TEST_SRC} - PUBLIC_LIBRARIES + SRCS + "algorithm_test.cc" + DEPS absl::algorithm + gmock_main ) +absl_cc_library( + NAME + algorithm_container + HDRS + "container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::meta + PUBLIC +) - - -# test container_test -set(CONTAINER_TEST_SRC "container_test.cc") - -absl_test( - TARGET +absl_cc_test( + NAME container_test - SOURCES - ${CONTAINER_TEST_SRC} - PUBLIC_LIBRARIES - absl::algorithm + SRCS + "container_test.cc" + DEPS + absl::algorithm_container + absl::base + absl::core_headers + absl::memory + absl::span + gmock_main ) diff --git a/absl/algorithm/algorithm.h b/absl/algorithm/algorithm.h index 3a8f2724..1eef16cb 100644 --- a/absl/algorithm/algorithm.h +++ b/absl/algorithm/algorithm.h @@ -27,7 +27,7 @@ #include namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace algorithm_internal { @@ -146,7 +146,7 @@ ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator>()); } -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_ALGORITHM_ALGORITHM_H_ diff --git a/absl/algorithm/container.h b/absl/algorithm/container.h index 8eb10d7a..b7718206 100644 --- a/absl/algorithm/container.h +++ b/absl/algorithm/container.h @@ -46,6 +46,8 @@ #include #include #include +#include +#include #include #include @@ -54,8 +56,7 @@ #include "absl/meta/type_traits.h" namespace absl { -inline namespace lts_2018_06_20 { - +inline namespace lts_2018_12_18 { namespace container_algorithm_internal { // NOTE: it is important to defer to ADL lookup for building with C++ modules, @@ -102,6 +103,17 @@ ContainerIter c_begin(C& c) { return begin(c); } template ContainerIter c_end(C& c) { return end(c); } +template +struct IsUnorderedContainer : std::false_type {}; + +template +struct IsUnorderedContainer< + std::unordered_map> : std::true_type {}; + +template +struct IsUnorderedContainer> + : std::true_type {}; + } // namespace container_algorithm_internal // PUBLIC API @@ -315,7 +327,7 @@ container_algorithm_internal::ContainerDifferenceType c_count_if( // c_mismatch() // -// Container-based version of the `std::mismatchf()` function to +// Container-based version of the `std::mismatch()` function to // return the first element where two ordered containers differ. template container_algorithm_internal::ContainerIterPairType @@ -495,7 +507,7 @@ BidirectionalIterator c_copy_backward(const C& src, // Container-based version of the `std::move()` function to move // a container's elements into an iterator. template -OutputIterator c_move(C& src, OutputIterator dest) { +OutputIterator c_move(C&& src, OutputIterator dest) { return std::move(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } @@ -635,7 +647,7 @@ container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, // Note: `c_xx()` container versions for `remove()`, `remove_if()`, // and `unique()` are omitted, because it's not clear whether or not such -// functions should call erase their supplied sequences afterwards. Either +// functions should call erase on their supplied sequences afterwards. Either // behavior would be surprising for a different set of users. // @@ -1155,7 +1167,13 @@ bool c_includes(const C1& c1, const C2& c2, Compare&& comp) { // Container-based version of the `std::set_union()` function // to return an iterator containing the union of two containers; duplicate // values are not copied into the output. -template +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), @@ -1165,7 +1183,13 @@ OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { // Overload of c_set_union() for performing a merge using a `comp` other than // `operator<`. -template +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, Compare&& comp) { return std::set_union(container_algorithm_internal::c_begin(c1), @@ -1179,7 +1203,13 @@ OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, // // Container-based version of the `std::set_intersection()` function // to return an iterator containing the intersection of two containers. -template +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output) { return std::set_intersection(container_algorithm_internal::c_begin(c1), @@ -1190,7 +1220,13 @@ OutputIterator c_set_intersection(const C1& c1, const C2& c2, // Overload of c_set_intersection() for performing a merge using a `comp` other // than `operator<`. -template +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output, Compare&& comp) { return std::set_intersection(container_algorithm_internal::c_begin(c1), @@ -1205,7 +1241,13 @@ OutputIterator c_set_intersection(const C1& c1, const C2& c2, // Container-based version of the `std::set_difference()` function // to return an iterator containing elements present in the first container but // not in the second. -template +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> OutputIterator c_set_difference(const C1& c1, const C2& c2, OutputIterator output) { return std::set_difference(container_algorithm_internal::c_begin(c1), @@ -1216,7 +1258,13 @@ OutputIterator c_set_difference(const C1& c1, const C2& c2, // Overload of c_set_difference() for performing a merge using a `comp` other // than `operator<`. -template +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> OutputIterator c_set_difference(const C1& c1, const C2& c2, OutputIterator output, Compare&& comp) { return std::set_difference(container_algorithm_internal::c_begin(c1), @@ -1231,7 +1279,13 @@ OutputIterator c_set_difference(const C1& c1, const C2& c2, // Container-based version of the `std::set_symmetric_difference()` // function to return an iterator containing elements present in either one // container or the other, but not both. -template +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output) { return std::set_symmetric_difference( @@ -1243,7 +1297,13 @@ OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, // Overload of c_set_symmetric_difference() for performing a merge using a // `comp` other than `operator<`. -template +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output, Compare&& comp) { @@ -1638,7 +1698,7 @@ OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, output_first, std::forward(op)); } -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_ALGORITHM_CONTAINER_H_ diff --git a/absl/algorithm/container_test.cc b/absl/algorithm/container_test.cc index de66f146..1502b17f 100644 --- a/absl/algorithm/container_test.cc +++ b/absl/algorithm/container_test.cc @@ -636,6 +636,21 @@ TEST(MutatingTest, Move) { Pointee(5))); } +TEST(MutatingTest, MoveWithRvalue) { + auto MakeRValueSrc = [] { + std::vector> src; + src.emplace_back(absl::make_unique(1)); + src.emplace_back(absl::make_unique(2)); + src.emplace_back(absl::make_unique(3)); + return src; + }; + + std::vector> dest = MakeRValueSrc(); + absl::c_move(MakeRValueSrc(), std::back_inserter(dest)); + EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(1), + Pointee(2), Pointee(3))); +} + TEST(MutatingTest, SwapRanges) { std::vector odds = {2, 4, 6}; std::vector evens = {1, 3, 5}; diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel index d117a4fe..4566c697 100644 --- a/absl/base/BUILD.bazel +++ b/absl/base/BUILD.bazel @@ -19,6 +19,7 @@ load( "ABSL_DEFAULT_COPTS", "ABSL_TEST_COPTS", "ABSL_EXCEPTIONS_FLAG", + "ABSL_EXCEPTIONS_FLAG_LINKOPTS", ) package(default_visibility = ["//visibility:public"]) @@ -29,6 +30,7 @@ cc_library( name = "spinlock_wait", srcs = [ "internal/spinlock_akaros.inc", + "internal/spinlock_linux.inc", "internal/spinlock_posix.inc", "internal/spinlock_wait.cc", "internal/spinlock_win32.inc", @@ -73,7 +75,6 @@ cc_library( copts = ABSL_DEFAULT_COPTS, deps = [ ":config", - ":dynamic_annotations", ], ) @@ -106,6 +107,7 @@ cc_library( "internal/identity.h", "internal/inline_variable.h", "internal/invoke.h", + "internal/scheduling_mode.h", ], copts = ABSL_DEFAULT_COPTS, visibility = [ @@ -179,13 +181,13 @@ cc_library( srcs = ["internal/throw_delegate.cc"], hdrs = ["internal/throw_delegate.h"], copts = ABSL_DEFAULT_COPTS + ABSL_EXCEPTIONS_FLAG, + linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":base", ":config", - ":core_headers", ], ) @@ -193,6 +195,7 @@ cc_test( name = "throw_delegate_test", srcs = ["throw_delegate_test.cc"], copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, + linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS, deps = [ ":throw_delegate", "@com_google_googletest//:gtest_main", @@ -225,6 +228,7 @@ cc_library( srcs = ["internal/exception_safety_testing.cc"], hdrs = ["internal/exception_safety_testing.h"], copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, + linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS, deps = [ ":base", ":config", @@ -232,7 +236,7 @@ cc_library( "//absl/memory", "//absl/meta:type_traits", "//absl/strings", - "//absl/types:optional", + "//absl/utility", "@com_google_googletest//:gtest", ], ) @@ -241,6 +245,7 @@ cc_test( name = "exception_safety_testing_test", srcs = ["exception_safety_testing_test.cc"], copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, + linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS, deps = [ ":exception_safety_testing", "//absl/memory", @@ -299,6 +304,7 @@ cc_test( size = "medium", srcs = ["spinlock_test_common.cc"], copts = ABSL_TEST_COPTS, + tags = ["no_test_wasm"], deps = [ ":base", ":core_headers", @@ -308,6 +314,33 @@ cc_test( ], ) +cc_library( + name = "spinlock_benchmark_common", + testonly = 1, + srcs = ["internal/spinlock_benchmark.cc"], + copts = ABSL_DEFAULT_COPTS, + visibility = [ + "//absl/base:__pkg__", + ], + deps = [ + ":base", + ":base_internal", + "//absl/synchronization", + "@com_github_google_benchmark//:benchmark_main", + ], + alwayslink = 1, +) + +cc_binary( + name = "spinlock_benchmark", + testonly = 1, + copts = ABSL_DEFAULT_COPTS, + visibility = ["//visibility:private"], + deps = [ + ":spinlock_benchmark_common", + ], +) + cc_library( name = "endian", hdrs = [ @@ -337,6 +370,9 @@ cc_test( name = "config_test", srcs = ["config_test.cc"], copts = ABSL_TEST_COPTS, + tags = [ + "no_test_wasm", + ], deps = [ ":config", "//absl/synchronization:thread_pool", @@ -348,6 +384,9 @@ cc_test( name = "call_once_test", srcs = ["call_once_test.cc"], copts = ABSL_TEST_COPTS, + tags = [ + "no_test_wasm", + ], deps = [ ":base", ":core_headers", @@ -362,6 +401,7 @@ cc_test( copts = ABSL_TEST_COPTS, deps = [ ":base", + "//absl/strings", "@com_google_googletest//:gtest_main", ], ) @@ -387,6 +427,7 @@ cc_test( "//absl:windows": [], "//conditions:default": ["-pthread"], }), + tags = ["no_test_ios_x86_64"], deps = [":malloc_internal"], ) @@ -399,6 +440,9 @@ cc_test( "//absl:windows": [], "//conditions:default": ["-pthread"], }), + tags = [ + "no_test_wasm", + ], deps = [ ":base", ":core_headers", @@ -419,3 +463,23 @@ cc_test( "@com_github_google_benchmark//:benchmark_main", ], ) + +cc_library( + name = "bits", + hdrs = ["internal/bits.h"], + visibility = [ + "//absl:__subpackages__", + ], + deps = [":core_headers"], +) + +cc_test( + name = "bits_test", + size = "small", + srcs = ["internal/bits_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":bits", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt index 303533e2..212dd083 100644 --- a/absl/base/CMakeLists.txt +++ b/absl/base/CMakeLists.txt @@ -14,373 +14,401 @@ # limitations under the License. # -list(APPEND BASE_PUBLIC_HEADERS - "attributes.h" - "call_once.h" - "casts.h" - "config.h" - "dynamic_annotations.h" - "log_severity.h" - "macros.h" - "optimization.h" - "policy_checks.h" - "port.h" - "thread_annotations.h" +absl_cc_library( + NAME + spinlock_wait + HDRS + "internal/scheduling_mode.h" + "internal/spinlock_wait.h" + SRCS + "internal/spinlock_akaros.inc" + "internal/spinlock_linux.inc" + "internal/spinlock_posix.inc" + "internal/spinlock_wait.cc" + "internal/spinlock_win32.inc" + DEPS + absl::core_headers ) +absl_cc_library( + NAME + config + HDRS + "config.h" + "policy_checks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + dynamic_annotations + HDRS + "dynamic_annotations.h" + SRCS + "dynamic_annotations.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEFINES + "__CLANG_SUPPORT_DYN_ANNOTATION__" + PUBLIC +) -list(APPEND BASE_INTERNAL_HEADERS - "internal/atomic_hook.h" - "internal/cycleclock.h" - "internal/direct_mmap.h" - "internal/endian.h" - "internal/exception_testing.h" - "internal/exception_safety_testing.h" - "internal/hide_ptr.h" - "internal/identity.h" - "internal/invoke.h" - "internal/inline_variable.h" - "internal/low_level_alloc.h" - "internal/low_level_scheduling.h" - "internal/per_thread_tls.h" - "internal/pretty_function.h" - "internal/raw_logging.h" - "internal/scheduling_mode.h" - "internal/spinlock.h" - "internal/spinlock_wait.h" - "internal/sysinfo.h" - "internal/thread_identity.h" - "internal/throw_delegate.h" - "internal/tsan_mutex_interface.h" - "internal/unaligned_access.h" - "internal/unscaledcycleclock.h" +absl_cc_library( + NAME + core_headers + HDRS + "attributes.h" + "macros.h" + "optimization.h" + "port.h" + "thread_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC ) +absl_cc_library( + NAME + malloc_internal + HDRS + "internal/direct_mmap.h" + "internal/low_level_alloc.h" + SRCS + "internal/low_level_alloc.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::dynamic_annotations + absl::spinlock_wait +) -# absl_base main library -list(APPEND BASE_SRC - "internal/cycleclock.cc" - "internal/raw_logging.cc" - "internal/spinlock.cc" - "internal/sysinfo.cc" - "internal/thread_identity.cc" - "internal/unscaledcycleclock.cc" - "internal/low_level_alloc.cc" - ${BASE_PUBLIC_HEADERS} - ${BASE_INTERNAL_HEADERS} +absl_cc_library( + NAME + base_internal + HDRS + "internal/hide_ptr.h" + "internal/identity.h" + "internal/inline_variable.h" + "internal/invoke.h" + COPTS + ${ABSL_DEFAULT_COPTS} ) -absl_library( - TARGET - absl_base - SOURCES - ${BASE_SRC} - PUBLIC_LIBRARIES - absl_dynamic_annotations - absl_spinlock_wait - EXPORT_NAME +absl_cc_library( + NAME base + HDRS + "call_once.h" + "casts.h" + "internal/atomic_hook.h" + "internal/cycleclock.h" + "internal/low_level_scheduling.h" + "internal/per_thread_tls.h" + "internal/raw_logging.h" + "internal/spinlock.h" + "internal/sysinfo.h" + "internal/thread_identity.h" + "internal/tsan_mutex_interface.h" + "internal/unscaledcycleclock.h" + "log_severity.h" + SRCS + "internal/cycleclock.cc" + "internal/raw_logging.cc" + "internal/spinlock.cc" + "internal/sysinfo.cc" + "internal/thread_identity.cc" + "internal/unscaledcycleclock.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::spinlock_wait + PUBLIC ) -# throw delegate library -set(THROW_DELEGATE_SRC "internal/throw_delegate.cc") - -absl_library( - TARGET - absl_throw_delegate - SOURCES - ${THROW_DELEGATE_SRC} - PUBLIC_LIBRARIES - ${THROW_DELEGATE_PUBLIC_LIBRARIES} - PRIVATE_COMPILE_FLAGS - ${ABSL_EXCEPTIONS_FLAG} - EXPORT_NAME +absl_cc_library( + NAME throw_delegate + HDRS + "internal/throw_delegate.h" + SRCS + "internal/throw_delegate.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + ${ABSL_EXCEPTIONS_FLAG} + DEPS + absl::base +) + +absl_cc_library( + NAME + exception_testing + HDRS + "internal/exception_testing.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + gtest + TESTONLY +) + +absl_cc_library( + NAME + pretty_function + HDRS + "internal/pretty_function.h" + COPTS + ${ABSL_DEFAULT_COPTS} ) -if(BUILD_TESTING) - # exception-safety testing library - set(EXCEPTION_SAFETY_TESTING_SRC +absl_cc_library( + NAME + exception_safety_testing + HDRS "internal/exception_safety_testing.h" + SRCS "internal/exception_safety_testing.cc" - ) - set(EXCEPTION_SAFETY_TESTING_PUBLIC_LIBRARIES - ${ABSL_TEST_COMMON_LIBRARIES} + COPTS + ${ABSL_DEFAULT_COPTS} + ${ABSL_EXCEPTIONS_FLAG} + DEPS absl::base + absl::config + absl::pretty_function absl::memory absl::meta absl::strings - absl::optional + absl::utility gtest - ) - -absl_library( - TARGET - absl_base_internal_exception_safety_testing - SOURCES - ${EXCEPTION_SAFETY_TESTING_SRC} - PUBLIC_LIBRARIES - ${EXCEPTION_SAFETY_TESTING_PUBLIC_LIBRARIES} - PRIVATE_COMPILE_FLAGS - ${ABSL_EXCEPTIONS_FLAG} -) -endif() - - -# dynamic_annotations library -set(DYNAMIC_ANNOTATIONS_SRC "dynamic_annotations.cc") - -absl_library( - TARGET - absl_dynamic_annotations - SOURCES - ${DYNAMIC_ANNOTATIONS_SRC} -) - - -# spinlock_wait library -set(SPINLOCK_WAIT_SRC "internal/spinlock_wait.cc") - -absl_library( - TARGET - absl_spinlock_wait - SOURCES - ${SPINLOCK_WAIT_SRC} -) - - -# malloc_internal library -list(APPEND MALLOC_INTERNAL_SRC - "internal/low_level_alloc.cc" + TESTONLY ) -absl_library( - TARGET - absl_malloc_internal - SOURCES - ${MALLOC_INTERNAL_SRC} - PUBLIC_LIBRARIES - absl_dynamic_annotations +absl_cc_test( + NAME + absl_exception_safety_testing_test + SRCS + "exception_safety_testing_test.cc" + COPTS + ${ABSL_EXCEPTIONS_FLAG} + LINKOPTS + ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} + DEPS + absl::exception_safety_testing + absl::memory + gtest_main ) - - -# -## TESTS -# - -# call once test -set(ATOMIC_HOOK_TEST_SRC "internal/atomic_hook_test.cc") -set(ATOMIC_HOOK_TEST_PUBLIC_LIBRARIES absl::base) - -absl_test( - TARGET +absl_cc_test( + NAME atomic_hook_test - SOURCES - ${ATOMIC_HOOK_TEST_SRC} - PUBLIC_LIBRARIES - ${ATOMIC_HOOK_TEST_PUBLIC_LIBRARIES} -) - - -# call once test -set(CALL_ONCE_TEST_SRC "call_once_test.cc") -set(CALL_ONCE_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization) - -absl_test( - TARGET - call_once_test - SOURCES - ${CALL_ONCE_TEST_SRC} - PUBLIC_LIBRARIES - ${CALL_ONCE_TEST_PUBLIC_LIBRARIES} + SRCS + "internal/atomic_hook_test.cc" + DEPS + absl::base + absl::core_headers + gtest_main ) - -# test bit_cast_test -set(BIT_CAST_TEST_SRC "bit_cast_test.cc") - -absl_test( - TARGET +absl_cc_test( + NAME bit_cast_test - SOURCES - ${BIT_CAST_TEST_SRC} + SRCS + "bit_cast_test.cc" + DEPS + absl::base + absl::core_headers + gtest_main ) - -# test absl_throw_delegate_test -set(THROW_DELEGATE_TEST_SRC "throw_delegate_test.cc") -set(THROW_DELEGATE_TEST_PUBLIC_LIBRARIES absl::base absl_throw_delegate) - -absl_test( - TARGET +absl_cc_test( + NAME throw_delegate_test - SOURCES - ${THROW_DELEGATE_TEST_SRC} - PUBLIC_LIBRARIES - ${THROW_DELEGATE_TEST_PUBLIC_LIBRARIES} -) - - -# test invoke_test -set(INVOKE_TEST_SRC "invoke_test.cc") -set(INVOKE_TEST_PUBLIC_LIBRARIES absl::strings) - -absl_test( - TARGET - invoke_test - SOURCES - ${INVOKE_TEST_SRC} - PUBLIC_LIBRARIES - ${INVOKE_TEST_PUBLIC_LIBRARIES} -) - - -# test inline_variable_test -list(APPEND INLINE_VARIABLE_TEST_SRC - "internal/inline_variable_testing.h" - "inline_variable_test.cc" - "inline_variable_test_a.cc" - "inline_variable_test_b.cc" + SRCS + "throw_delegate_test.cc" + DEPS + absl::base + absl_internal_throw_delegate + gtest_main ) -set(INLINE_VARIABLE_TEST_PUBLIC_LIBRARIES absl::base) - -absl_test( - TARGET +absl_cc_test( + NAME inline_variable_test - SOURCES - ${INLINE_VARIABLE_TEST_SRC} - PUBLIC_LIBRARIES - ${INLINE_VARIABLE_TEST_PUBLIC_LIBRARIES} + SRCS + "internal/inline_variable_testing.h" + "inline_variable_test.cc" + "inline_variable_test_a.cc" + "inline_variable_test_b.cc" + DEPS + absl::base_internal + gtest_main ) +absl_cc_test( + NAME + invoke_test + SRCS + "invoke_test.cc" + DEPS + absl::base_internal + absl::memory + absl::strings + gmock + gtest_main +) -# test spinlock_test_common -set(SPINLOCK_TEST_COMMON_SRC "spinlock_test_common.cc") -set(SPINLOCK_TEST_COMMON_PUBLIC_LIBRARIES absl::base absl::synchronization) - -absl_test( - TARGET +absl_cc_library( + NAME spinlock_test_common - SOURCES - ${SPINLOCK_TEST_COMMON_SRC} - PUBLIC_LIBRARIES - ${SPINLOCK_TEST_COMMON_PUBLIC_LIBRARIES} + SRCS + "spinlock_test_common.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + absl::spinlock_wait + absl::synchronization + gtest + TESTONLY ) - -# test spinlock_test -set(SPINLOCK_TEST_SRC "spinlock_test_common.cc") -set(SPINLOCK_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization) - -absl_test( - TARGET +# On bazel BUILD this target use "alwayslink = 1" which is not implemented here +absl_cc_test( + NAME spinlock_test - SOURCES - ${SPINLOCK_TEST_SRC} - PUBLIC_LIBRARIES - ${SPINLOCK_TEST_PUBLIC_LIBRARIES} + SRCS + "spinlock_test_common.cc" + DEPS + absl::base + absl::core_headers + absl::spinlock_wait + absl::synchronization + gtest_main ) +absl_cc_library( + NAME + endian + HDRS + "internal/endian.h" + "internal/unaligned_access.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + PUBLIC +) -# test endian_test -set(ENDIAN_TEST_SRC "internal/endian_test.cc") - -absl_test( - TARGET +absl_cc_test( + NAME endian_test - SOURCES - ${ENDIAN_TEST_SRC} + SRCS + "internal/endian_test.cc" + DEPS + absl::base + absl::config + absl::endian + gtest_main ) - -# test config_test -set(CONFIG_TEST_SRC "config_test.cc") -set(CONFIG_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization) -absl_test( - TARGET +absl_cc_test( + NAME config_test - SOURCES - ${CONFIG_TEST_SRC} - PUBLIC_LIBRARIES - ${CONFIG_TEST_PUBLIC_LIBRARIES} + SRCS + "config_test.cc" + DEPS + absl::config + absl::synchronization + gtest_main ) +absl_cc_test( + NAME + call_once_test + SRCS + "call_once_test.cc" + DEPS + absl::base + absl::core_headers + absl::synchronization + gtest_main +) -# test raw_logging_test -set(RAW_LOGGING_TEST_SRC "raw_logging_test.cc") -set(RAW_LOGGING_TEST_PUBLIC_LIBRARIES absl::base) - -absl_test( - TARGET +absl_cc_test( + NAME raw_logging_test - SOURCES - ${RAW_LOGGING_TEST_SRC} - PUBLIC_LIBRARIES - ${RAW_LOGGING_TEST_PUBLIC_LIBRARIES} + SRCS + "raw_logging_test.cc" + DEPS + absl::base + absl::strings + gtest_main ) - -# test sysinfo_test -set(SYSINFO_TEST_SRC "internal/sysinfo_test.cc") -set(SYSINFO_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization) - -absl_test( - TARGET +absl_cc_test( + NAME sysinfo_test - SOURCES - ${SYSINFO_TEST_SRC} - PUBLIC_LIBRARIES - ${SYSINFO_TEST_PUBLIC_LIBRARIES} + SRCS + "internal/sysinfo_test.cc" + DEPS + absl::base + absl::synchronization + gtest_main ) - -# test low_level_alloc_test -set(LOW_LEVEL_ALLOC_TEST_SRC "internal/low_level_alloc_test.cc") -set(LOW_LEVEL_ALLOC_TEST_PUBLIC_LIBRARIES absl::base) - -absl_test( - TARGET +absl_cc_test( + NAME low_level_alloc_test - SOURCES - ${LOW_LEVEL_ALLOC_TEST_SRC} - PUBLIC_LIBRARIES - ${LOW_LEVEL_ALLOC_TEST_PUBLIC_LIBRARIES} + SRCS + "internal/low_level_alloc_test.cc" + DEPS + absl::malloc_internal + Threads::Threads ) - -# test thread_identity_test -set(THREAD_IDENTITY_TEST_SRC "internal/thread_identity_test.cc") -set(THREAD_IDENTITY_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization) - -absl_test( - TARGET +absl_cc_test( + NAME thread_identity_test - SOURCES - ${THREAD_IDENTITY_TEST_SRC} - PUBLIC_LIBRARIES - ${THREAD_IDENTITY_TEST_PUBLIC_LIBRARIES} + SRCS + "internal/thread_identity_test.cc" + DEPS + absl::base + absl::core_headers + absl::synchronization + Threads::Threads + gtest_main ) -#test exceptions_safety_testing_test -set(EXCEPTION_SAFETY_TESTING_TEST_SRC "exception_safety_testing_test.cc") -set(EXCEPTION_SAFETY_TESTING_TEST_PUBLIC_LIBRARIES - absl::base - absl_base_internal_exception_safety_testing - absl::memory - absl::meta - absl::strings - absl::optional +absl_cc_library( + NAME + bits + HDRS + "internal/bits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers ) -absl_test( - TARGET - absl_exception_safety_testing_test - SOURCES - ${EXCEPTION_SAFETY_TESTING_TEST_SRC} - PUBLIC_LIBRARIES - ${EXCEPTION_SAFETY_TESTING_TEST_PUBLIC_LIBRARIES} - PRIVATE_COMPILE_FLAGS - ${ABSL_EXCEPTIONS_FLAG} +absl_cc_test( + NAME + bits_test + SRCS + "internal/bits_test.cc" + DEPS + absl::bits + gtest_main ) diff --git a/absl/base/attributes.h b/absl/base/attributes.h index b1883b6d..291ad89e 100644 --- a/absl/base/attributes.h +++ b/absl/base/attributes.h @@ -100,7 +100,7 @@ // ABSL_PRINTF_ATTRIBUTE // ABSL_SCANF_ATTRIBUTE // -// Tells the compiler to perform `printf` format std::string checking if the +// Tells the compiler to perform `printf` format string checking if the // compiler supports it; see the 'format' attribute in // . // @@ -155,7 +155,12 @@ // ABSL_ATTRIBUTE_WEAK // // Tags a function as weak for the purposes of compilation and linking. -#if ABSL_HAVE_ATTRIBUTE(weak) || (defined(__GNUC__) && !defined(__clang__)) +// Weak attributes currently do not work properly in LLVM's Windows backend, +// so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// for futher information. +#if (ABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !(defined(__llvm__) && defined(_WIN32)) #undef ABSL_ATTRIBUTE_WEAK #define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) #define ABSL_HAVE_ATTRIBUTE_WEAK 1 @@ -296,13 +301,13 @@ // ABSL_HAVE_ATTRIBUTE_SECTION // -// Indicates whether labeled sections are supported. Labeled sections are not -// supported on Darwin/iOS. +// Indicates whether labeled sections are supported. Weak symbol support is +// a prerequisite. Labeled sections are not supported on Darwin/iOS. #ifdef ABSL_HAVE_ATTRIBUTE_SECTION #error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set #elif (ABSL_HAVE_ATTRIBUTE(section) || \ (defined(__GNUC__) && !defined(__clang__))) && \ - !defined(__APPLE__) + !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK #define ABSL_HAVE_ATTRIBUTE_SECTION 1 // ABSL_ATTRIBUTE_SECTION @@ -397,17 +402,28 @@ // ABSL_MUST_USE_RESULT // -// Tells the compiler to warn about unused return values for functions declared -// with this macro. The macro must appear as the very first part of a function -// declaration or definition: +// Tells the compiler to warn about unused results. // -// Example: +// When annotating a function, it must appear as the first part of the +// declaration or definition. The compiler will warn if the return value from +// such a function is unused: // // ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); +// AllocateSprocket(); // Triggers a warning. +// +// When annotating a class, it is equivalent to annotating every function which +// returns an instance. +// +// class ABSL_MUST_USE_RESULT Sprocket {}; +// Sprocket(); // Triggers a warning. +// +// Sprocket MakeSprocket(); +// MakeSprocket(); // Triggers a warning. +// +// Note that references and pointers are not instances: // -// This placement has the broadest compatibility with GCC, Clang, and MSVC, with -// both defs and decls, and with GCC-style attributes, MSVC declspec, C++11 -// and C++17 attributes. +// Sprocket* SprocketPointer(); +// SprocketPointer(); // Does *not* trigger a warning. // // ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result // warning. For that, warn_unused_result is used only for clang but not for gcc. @@ -494,14 +510,27 @@ #define ABSL_XRAY_LOG_ARGS(N) #endif +// ABSL_ATTRIBUTE_REINITIALIZES +// +// Indicates that a member function reinitializes the entire object to a known +// state, independent of the previous state of the object. +// +// The clang-tidy check bugprone-use-after-move allows member functions marked +// with this attribute to be called on objects that have been moved from; +// without the attribute, this would result in a use-after-move warning. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes) +#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]] +#else +#define ABSL_ATTRIBUTE_REINITIALIZES +#endif + // ----------------------------------------------------------------------------- // Variable Attributes // ----------------------------------------------------------------------------- // ABSL_ATTRIBUTE_UNUSED // -// Prevents the compiler from complaining about or optimizing away variables -// that appear unused. +// Prevents the compiler from complaining about variables that appear unused. #if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) #undef ABSL_ATTRIBUTE_UNUSED #define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) diff --git a/absl/base/bit_cast_test.cc b/absl/base/bit_cast_test.cc index 71bb368f..5af036df 100644 --- a/absl/base/bit_cast_test.cc +++ b/absl/base/bit_cast_test.cc @@ -22,7 +22,7 @@ #include "absl/base/macros.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace { template @@ -105,5 +105,5 @@ TEST(BitCast, Double) { } } // namespace -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/call_once.h b/absl/base/call_once.h index 37b6608a..aea9197b 100644 --- a/absl/base/call_once.h +++ b/absl/base/call_once.h @@ -39,7 +39,7 @@ #include "absl/base/port.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { class once_flag; @@ -151,12 +151,8 @@ void CallOnceImpl(std::atomic* control, old_control != kOnceRunning && old_control != kOnceWaiter && old_control != kOnceDone) { - ABSL_RAW_LOG( - FATAL, - "Unexpected value for control word: %lx. Either the control word " - "has non-static storage duration (where GoogleOnceDynamic might " - "be appropriate), or there's been a memory corruption.", - static_cast(old_control)); // NOLINT + ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", + static_cast(old_control)); // NOLINT } } #endif // NDEBUG @@ -212,7 +208,7 @@ void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) { } } -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_CALL_ONCE_H_ diff --git a/absl/base/call_once_test.cc b/absl/base/call_once_test.cc index 43a71656..4d98a405 100644 --- a/absl/base/call_once_test.cc +++ b/absl/base/call_once_test.cc @@ -22,7 +22,7 @@ #include "absl/synchronization/mutex.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace { absl::once_flag once; @@ -100,5 +100,5 @@ TEST(CallOnceTest, ExecutionCount) { } } // namespace -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/casts.h b/absl/base/casts.h index cd07d8f6..bba623b4 100644 --- a/absl/base/casts.h +++ b/absl/base/casts.h @@ -25,12 +25,36 @@ #define ABSL_BASE_CASTS_H_ #include +#include #include #include "absl/base/internal/identity.h" +#include "absl/base/macros.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { + +namespace internal_casts { + +// NOTE: Not a fully compliant implementation of `std::is_trivially_copyable`. +// TODO(calabrese) Branch on implementations that directly provide +// `std::is_trivially_copyable`, create a more rigorous workaround, and publicly +// expose in meta/type_traits. +template +struct is_trivially_copyable + : std::integral_constant< + bool, std::is_destructible::value&& __has_trivial_destructor(T) && + __has_trivial_copy(T) && __has_trivial_assign(T)> {}; + +template +struct is_bitcastable + : std::integral_constant::value && + is_trivially_copyable::value && + std::is_default_constructible::value> {}; + +} // namespace internal_casts // implicit_cast() // @@ -82,7 +106,7 @@ inline namespace lts_2018_06_20 { // // Such implicit cast chaining may be useful within template logic. template -inline To implicit_cast(typename absl::internal::identity_t to) { +constexpr To implicit_cast(typename absl::internal::identity_t to) { return to; } @@ -126,7 +150,32 @@ inline To implicit_cast(typename absl::internal::identity_t to) { // and reading its bits back using a different type. A `bit_cast()` avoids this // issue by implementing its casts using `memcpy()`, which avoids introducing // this undefined behavior. -template +// +// NOTE: The requirements here are more strict than the bit_cast of standard +// proposal p0476 due to the need for workarounds and lack of intrinsics. +// Specifically, this implementation also requires `Dest` to be +// default-constructible. +template < + typename Dest, typename Source, + typename std::enable_if::value, + int>::type = 0> +inline Dest bit_cast(const Source& source) { + Dest dest; + memcpy(static_cast(std::addressof(dest)), + static_cast(std::addressof(source)), sizeof(dest)); + return dest; +} + +// NOTE: This overload is only picked if the requirements of bit_cast are not +// met. It is therefore UB, but is provided temporarily as previous versions of +// this function template were unchecked. Do not use this in new code. +template < + typename Dest, typename Source, + typename std::enable_if< + !internal_casts::is_bitcastable::value, int>::type = 0> +ABSL_DEPRECATED( + "absl::bit_cast type requirements were violated. Update the types being " + "used such that they are the same size and are both TriviallyCopyable.") inline Dest bit_cast(const Source& source) { static_assert(sizeof(Dest) == sizeof(Source), "Source and destination types should have equal sizes."); @@ -136,7 +185,7 @@ inline Dest bit_cast(const Source& source) { return dest; } -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_CASTS_H_ diff --git a/absl/base/config.h b/absl/base/config.h index 2f5f1595..db4c4539 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -139,12 +139,18 @@ #ifdef ABSL_HAVE_THREAD_LOCAL #error ABSL_HAVE_THREAD_LOCAL cannot be directly set #elif defined(__APPLE__) -// Notes: Xcode's clang did not support `thread_local` until version -// 8, and even then not for all iOS < 9.0. Also, Xcode 9.3 started disallowing -// `thread_local` for 32-bit iOS simulator targeting iOS 9.x. -// `__has_feature` is only supported by Clang so it has be inside +// Notes: +// * Xcode's clang did not support `thread_local` until version 8, and +// even then not for all iOS < 9.0. +// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator +// targeting iOS 9.x. +// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time +// making __has_feature unreliable there. +// +// Otherwise, `__has_feature` is only supported by Clang so it has be inside // `defined(__APPLE__)` check. -#if __has_feature(cxx_thread_local) +#if __has_feature(cxx_thread_local) && \ + !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) #define ABSL_HAVE_THREAD_LOCAL 1 #endif #else // !defined(__APPLE__) @@ -199,7 +205,7 @@ #define ABSL_HAVE_INTRINSIC_INT128 1 #elif defined(__CUDACC__) // __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a -// std::string explaining that it has been removed starting with CUDA 9. We use +// string explaining that it has been removed starting with CUDA 9. We use // nested #ifs because there is no short-circuiting in the preprocessor. // NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined. #if __CUDACC_VER__ >= 70000 @@ -268,7 +274,8 @@ #error ABSL_HAVE_MMAP cannot be directly set #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ - defined(__wasm__) || defined(__Fuchsia__) + defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ + defined(__ASYLO__) #define ABSL_HAVE_MMAP 1 #endif @@ -322,6 +329,8 @@ #define ABSL_HAVE_ALARM 1 #elif defined(_MSC_VER) // feature tests for Microsoft's library +#elif defined(__EMSCRIPTEN__) +// emscripten doesn't support signals #elif defined(__native_client__) #else // other standard libraries @@ -356,6 +365,18 @@ #error "absl endian detection needs to be set up for your compiler" #endif +// MacOS 10.13 doesn't let you use , , or even though +// the headers exist and are publicly noted to work. See +// https://github.com/abseil/abseil-cpp/issues/207 and +// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ + defined(__MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101400 +#define ABSL_INTERNAL_MACOS_HAS_CXX_17_TYPES 1 +#else +#define ABSL_INTERNAL_MACOS_HAS_CXX_17_TYPES 0 +#endif + // ABSL_HAVE_STD_ANY // // Checks whether C++17 std::any is available by checking whether exists. @@ -364,7 +385,8 @@ #endif #ifdef __has_include -#if __has_include() && __cplusplus >= 201703L +#if __has_include() && __cplusplus >= 201703L && \ + ABSL_INTERNAL_MACOS_HAS_CXX_17_TYPES #define ABSL_HAVE_STD_ANY 1 #endif #endif @@ -377,7 +399,8 @@ #endif #ifdef __has_include -#if __has_include() && __cplusplus >= 201703L +#if __has_include() && __cplusplus >= 201703L && \ + ABSL_INTERNAL_MACOS_HAS_CXX_17_TYPES #define ABSL_HAVE_STD_OPTIONAL 1 #endif #endif @@ -390,7 +413,8 @@ #endif #ifdef __has_include -#if __has_include() && __cplusplus >= 201703L +#if __has_include() && __cplusplus >= 201703L && \ + ABSL_INTERNAL_MACOS_HAS_CXX_17_TYPES #define ABSL_HAVE_STD_VARIANT 1 #endif #endif @@ -414,14 +438,21 @@ // , is implemented) or higher. Also, `__cplusplus` is // not correctly set by MSVC, so we use `_MSVC_LANG` to check the language // version. -// TODO(zhangxy): fix tests before enabling aliasing for `std::any`, -// `std::string_view`. +// TODO(zhangxy): fix tests before enabling aliasing for `std::any`. #if defined(_MSC_VER) && _MSC_VER >= 1910 && \ ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402) // #define ABSL_HAVE_STD_ANY 1 #define ABSL_HAVE_STD_OPTIONAL 1 #define ABSL_HAVE_STD_VARIANT 1 -// #define ABSL_HAVE_STD_STRING_VIEW 1 +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif + +// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION +// SEH exception from emplace for variant when constructing the +// struct can throw. This defeats some of variant_test and +// variant_exception_safety_test. +#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG) +#define ABSL_INTERNAL_MSVC_2017_DBG_MODE #endif #endif // ABSL_BASE_CONFIG_H_ diff --git a/absl/base/exception_safety_testing_test.cc b/absl/base/exception_safety_testing_test.cc index 97c8d6f8..7518264d 100644 --- a/absl/base/exception_safety_testing_test.cc +++ b/absl/base/exception_safety_testing_test.cc @@ -38,7 +38,7 @@ template void ExpectNoThrow(const F& f) { try { f(); - } catch (TestException e) { + } catch (const TestException& e) { ADD_FAILURE() << "Unexpected exception thrown from " << e.what(); } } @@ -179,7 +179,7 @@ TEST(ThrowingValueTest, ThrowingStreamOps) { } // Tests the operator<< of ThrowingValue by forcing ConstructorTracker to emit -// a nonfatal failure that contains the std::string representation of the Thrower +// a nonfatal failure that contains the string representation of the Thrower TEST(ThrowingValueTest, StreamOpsOutput) { using ::testing::TypeSpec; exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); @@ -548,21 +548,21 @@ TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) { // Test that providing operation and inveriants still does not allow for the // the invocation of .Test() and .Test(op) because it lacks a factory auto without_fac = - testing::MakeExceptionSafetyTester().WithOperation(op).WithInvariants( + testing::MakeExceptionSafetyTester().WithOperation(op).WithContracts( inv, testing::strong_guarantee); EXPECT_FALSE(HasNullaryTest(without_fac)); EXPECT_FALSE(HasUnaryTest(without_fac)); - // Test that providing invariants and factory allows the invocation of + // Test that providing contracts and factory allows the invocation of // .Test(op) but does not allow for .Test() because it lacks an operation auto without_op = testing::MakeExceptionSafetyTester() - .WithInvariants(inv, testing::strong_guarantee) + .WithContracts(inv, testing::strong_guarantee) .WithFactory(fac); EXPECT_FALSE(HasNullaryTest(without_op)); EXPECT_TRUE(HasUnaryTest(without_op)); // Test that providing operation and factory still does not allow for the - // the invocation of .Test() and .Test(op) because it lacks invariants + // the invocation of .Test() and .Test(op) because it lacks contracts auto without_inv = testing::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac); EXPECT_FALSE(HasNullaryTest(without_inv)); @@ -577,7 +577,7 @@ std::unique_ptr ExampleFunctionFactory() { void ExampleFunctionOperation(ExampleStruct*) {} -testing::AssertionResult ExampleFunctionInvariant(ExampleStruct*) { +testing::AssertionResult ExampleFunctionContract(ExampleStruct*) { return testing::AssertionSuccess(); } @@ -593,16 +593,16 @@ struct { struct { testing::AssertionResult operator()(ExampleStruct* example_struct) const { - return ExampleFunctionInvariant(example_struct); + return ExampleFunctionContract(example_struct); } -} example_struct_invariant; +} example_struct_contract; auto example_lambda_factory = []() { return ExampleFunctionFactory(); }; auto example_lambda_operation = [](ExampleStruct*) {}; -auto example_lambda_invariant = [](ExampleStruct* example_struct) { - return ExampleFunctionInvariant(example_struct); +auto example_lambda_contract = [](ExampleStruct* example_struct) { + return ExampleFunctionContract(example_struct); }; // Testing that function references, pointers, structs with operator() and @@ -612,28 +612,28 @@ TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) { EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithFactory(ExampleFunctionFactory) .WithOperation(ExampleFunctionOperation) - .WithInvariants(ExampleFunctionInvariant) + .WithContracts(ExampleFunctionContract) .Test()); // function pointer EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithFactory(&ExampleFunctionFactory) .WithOperation(&ExampleFunctionOperation) - .WithInvariants(&ExampleFunctionInvariant) + .WithContracts(&ExampleFunctionContract) .Test()); // struct EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithFactory(example_struct_factory) .WithOperation(example_struct_operation) - .WithInvariants(example_struct_invariant) + .WithContracts(example_struct_contract) .Test()); // lambda EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithFactory(example_lambda_factory) .WithOperation(example_lambda_operation) - .WithInvariants(example_lambda_invariant) + .WithContracts(example_lambda_contract) .Test()); } @@ -658,9 +658,9 @@ struct { } invoker; auto tester = - testing::MakeExceptionSafetyTester().WithOperation(invoker).WithInvariants( + testing::MakeExceptionSafetyTester().WithOperation(invoker).WithContracts( CheckNonNegativeInvariants); -auto strong_tester = tester.WithInvariants(testing::strong_guarantee); +auto strong_tester = tester.WithContracts(testing::strong_guarantee); struct FailsBasicGuarantee : public NonNegative { void operator()() { @@ -690,7 +690,7 @@ TEST(ExceptionCheckTest, StrongGuaranteeFailure) { EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); } -struct BasicGuaranteeWithExtraInvariants : public NonNegative { +struct BasicGuaranteeWithExtraContracts : public NonNegative { // After operator(), i is incremented. If operator() throws, i is set to 9999 void operator()() { int old_i = i; @@ -701,21 +701,21 @@ struct BasicGuaranteeWithExtraInvariants : public NonNegative { static constexpr int kExceptionSentinel = 9999; }; -constexpr int BasicGuaranteeWithExtraInvariants::kExceptionSentinel; +constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel; -TEST(ExceptionCheckTest, BasicGuaranteeWithExtraInvariants) { +TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) { auto tester_with_val = - tester.WithInitialValue(BasicGuaranteeWithExtraInvariants{}); + tester.WithInitialValue(BasicGuaranteeWithExtraContracts{}); EXPECT_TRUE(tester_with_val.Test()); EXPECT_TRUE( tester_with_val - .WithInvariants([](BasicGuaranteeWithExtraInvariants* o) { - if (o->i == BasicGuaranteeWithExtraInvariants::kExceptionSentinel) { + .WithContracts([](BasicGuaranteeWithExtraContracts* o) { + if (o->i == BasicGuaranteeWithExtraContracts::kExceptionSentinel) { return testing::AssertionSuccess(); } return testing::AssertionFailure() << "i should be " - << BasicGuaranteeWithExtraInvariants::kExceptionSentinel + << BasicGuaranteeWithExtraContracts::kExceptionSentinel << ", but is " << o->i; }) .Test()); @@ -740,7 +740,7 @@ struct HasReset : public NonNegative { void reset() { i = 0; } }; -testing::AssertionResult CheckHasResetInvariants(HasReset* h) { +testing::AssertionResult CheckHasResetContracts(HasReset* h) { h->reset(); return testing::AssertionResult(h->i == 0); } @@ -759,17 +759,29 @@ TEST(ExceptionCheckTest, ModifyingChecker) { }; EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{}) - .WithInvariants(set_to_1000, is_1000) + .WithContracts(set_to_1000, is_1000) .Test()); EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}) - .WithInvariants(increment) + .WithContracts(increment) .Test()); EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithInitialValue(HasReset{}) - .WithInvariants(CheckHasResetInvariants) + .WithContracts(CheckHasResetContracts) .Test(invoker)); } +TEST(ExceptionSafetyTesterTest, ResetsCountdown) { + auto test = + testing::MakeExceptionSafetyTester() + .WithInitialValue(ThrowingValue<>()) + .WithContracts([](ThrowingValue<>*) { return AssertionSuccess(); }) + .WithOperation([](ThrowingValue<>*) {}); + ASSERT_TRUE(test.Test()); + // If the countdown isn't reset because there were no exceptions thrown, then + // this will fail with a termination from an unhandled exception + EXPECT_TRUE(test.Test()); +} + struct NonCopyable : public NonNegative { NonCopyable(const NonCopyable&) = delete; NonCopyable() : NonNegative{0} {} @@ -799,7 +811,7 @@ TEST(ExceptionCheckTest, NonEqualityComparable) { return testing::AssertionResult(nec->i == NonEqualityComparable().i); }; auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{}) - .WithInvariants(nec_is_strong); + .WithContracts(nec_is_strong); EXPECT_TRUE(strong_nec_tester.Test()); EXPECT_FALSE(strong_nec_tester.Test( @@ -833,14 +845,14 @@ struct { testing::AssertionResult operator()(ExhaustivenessTester*) const { return testing::AssertionSuccess(); } -} CheckExhaustivenessTesterInvariants; +} CheckExhaustivenessTesterContracts; template unsigned char ExhaustivenessTester::successes = 0; TEST(ExceptionCheckTest, Exhaustiveness) { auto exhaust_tester = testing::MakeExceptionSafetyTester() - .WithInvariants(CheckExhaustivenessTesterInvariants) + .WithContracts(CheckExhaustivenessTesterContracts) .WithOperation(invoker); EXPECT_TRUE( @@ -849,7 +861,7 @@ TEST(ExceptionCheckTest, Exhaustiveness) { EXPECT_TRUE( exhaust_tester.WithInitialValue(ExhaustivenessTester>{}) - .WithInvariants(testing::strong_guarantee) + .WithContracts(testing::strong_guarantee) .Test()); EXPECT_EQ(ExhaustivenessTester>::successes, 0xF); } @@ -931,8 +943,8 @@ TEST(ThrowingValueTraitsTest, RelationalOperators) { } TEST(ThrowingAllocatorTraitsTest, Assignablility) { - EXPECT_TRUE(std::is_move_assignable>::value); - EXPECT_TRUE(std::is_copy_assignable>::value); + EXPECT_TRUE(absl::is_move_assignable>::value); + EXPECT_TRUE(absl::is_copy_assignable>::value); EXPECT_TRUE(std::is_nothrow_move_assignable>::value); EXPECT_TRUE(std::is_nothrow_copy_assignable>::value); } diff --git a/absl/base/inline_variable_test.cc b/absl/base/inline_variable_test.cc index b34aebd8..b968b10f 100644 --- a/absl/base/inline_variable_test.cc +++ b/absl/base/inline_variable_test.cc @@ -20,7 +20,7 @@ #include "gtest/gtest.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace inline_variable_testing_internal { namespace { @@ -60,5 +60,5 @@ TEST(InlineVariableTest, FunPtrType) { } // namespace } // namespace inline_variable_testing_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/inline_variable_test_a.cc b/absl/base/inline_variable_test_a.cc index 0ea363af..a51b1d81 100644 --- a/absl/base/inline_variable_test_a.cc +++ b/absl/base/inline_variable_test_a.cc @@ -15,7 +15,7 @@ #include "absl/base/internal/inline_variable_testing.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace inline_variable_testing_internal { const Foo& get_foo_a() { return inline_variable_foo; } @@ -23,5 +23,5 @@ const Foo& get_foo_a() { return inline_variable_foo; } const int& get_int_a() { return inline_variable_int; } } // namespace inline_variable_testing_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/inline_variable_test_b.cc b/absl/base/inline_variable_test_b.cc index 32704cf1..5041e20a 100644 --- a/absl/base/inline_variable_test_b.cc +++ b/absl/base/inline_variable_test_b.cc @@ -15,7 +15,7 @@ #include "absl/base/internal/inline_variable_testing.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace inline_variable_testing_internal { const Foo& get_foo_b() { return inline_variable_foo; } @@ -23,5 +23,5 @@ const Foo& get_foo_b() { return inline_variable_foo; } const int& get_int_b() { return inline_variable_int; } } // namespace inline_variable_testing_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/atomic_hook.h b/absl/base/internal/atomic_hook.h index 7d0ee2fa..58ddf272 100644 --- a/absl/base/internal/atomic_hook.h +++ b/absl/base/internal/atomic_hook.h @@ -28,7 +28,7 @@ #endif namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { template @@ -161,7 +161,7 @@ class AtomicHook { #undef ABSL_HAVE_WORKING_ATOMIC_POINTER } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/absl/base/internal/bits.h b/absl/base/internal/bits.h new file mode 100644 index 00000000..29657426 --- /dev/null +++ b/absl/base/internal/bits.h @@ -0,0 +1,195 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_BITS_H_ +#define ABSL_BASE_INTERNAL_BITS_H_ + +// This file contains bitwise ops which are implementation details of various +// absl libraries. + +#include + +// Clang on Windows has __builtin_clzll; otherwise we need to use the +// windows intrinsic functions. +#if defined(_MSC_VER) +#include +#if defined(_M_X64) +#pragma intrinsic(_BitScanReverse64) +#pragma intrinsic(_BitScanForward64) +#endif +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#endif + +#include "absl/base/attributes.h" + +#if defined(_MSC_VER) +// We can achieve something similar to attribute((always_inline)) with MSVC by +// using the __forceinline keyword, however this is not perfect. MSVC is +// much less aggressive about inlining, and even with the __forceinline keyword. +#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline +#else +// Use default attribute inline. +#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + + +namespace absl { +inline namespace lts_2018_12_18 { +namespace base_internal { + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) { + int zeroes = 60; + if (n >> 32) zeroes -= 32, n >>= 32; + if (n >> 16) zeroes -= 16, n >>= 16; + if (n >> 8) zeroes -= 8, n >>= 8; + if (n >> 4) zeroes -= 4, n >>= 4; + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) { +#if defined(_MSC_VER) && defined(_M_X64) + // MSVC does not have __buitin_clzll. Use _BitScanReverse64. + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse64(&result, n)) { + return 63 - result; + } + return 64; +#elif defined(_MSC_VER) + // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse + unsigned long result = 0; // NOLINT(runtime/int) + if ((n >> 32) && _BitScanReverse(&result, n >> 32)) { + return 31 - result; + } + if (_BitScanReverse(&result, n)) { + return 63 - result; + } + return 64; +#elif defined(__GNUC__) + // Use __builtin_clzll, which uses the following instructions: + // x86: bsr + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) + "__builtin_clzll does not take 64-bit arg"); + + // Handle 0 as a special case because __builtin_clzll(0) is undefined. + if (n == 0) { + return 64; + } + return __builtin_clzll(n); +#else + return CountLeadingZeros64Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) { + int zeroes = 28; + if (n >> 16) zeroes -= 16, n >>= 16; + if (n >> 8) zeroes -= 8, n >>= 8; + if (n >> 4) zeroes -= 4, n >>= 4; + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) { +#if defined(_MSC_VER) + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse(&result, n)) { + return 31 - result; + } + return 32; +#elif defined(__GNUC__) + // Use __builtin_clz, which uses the following instructions: + // x86: bsr + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(int) == sizeof(n), + "__builtin_clz does not take 32-bit arg"); + + // Handle 0 as a special case because __builtin_clz(0) is undefined. + if (n == 0) { + return 32; + } + return __builtin_clz(n); +#else + return CountLeadingZeros32Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) { + int c = 63; + n &= ~n + 1; + if (n & 0x00000000FFFFFFFF) c -= 32; + if (n & 0x0000FFFF0000FFFF) c -= 16; + if (n & 0x00FF00FF00FF00FF) c -= 8; + if (n & 0x0F0F0F0F0F0F0F0F) c -= 4; + if (n & 0x3333333333333333) c -= 2; + if (n & 0x5555555555555555) c -= 1; + return c; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) { +#if defined(_MSC_VER) && defined(_M_X64) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward64(&result, n); + return result; +#elif defined(_MSC_VER) + unsigned long result = 0; // NOLINT(runtime/int) + if (static_cast(n) == 0) { + _BitScanForward(&result, n >> 32); + return result + 32; + } + _BitScanForward(&result, n); + return result; +#elif defined(__GNUC__) + static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) + "__builtin_ctzll does not take 64-bit arg"); + return __builtin_ctzll(n); +#else + return CountTrailingZerosNonZero64Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) { + int c = 31; + n &= ~n + 1; + if (n & 0x0000FFFF) c -= 16; + if (n & 0x00FF00FF) c -= 8; + if (n & 0x0F0F0F0F) c -= 4; + if (n & 0x33333333) c -= 2; + if (n & 0x55555555) c -= 1; + return c; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) { +#if defined(_MSC_VER) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward(&result, n); + return result; +#elif defined(__GNUC__) + static_assert(sizeof(int) == sizeof(n), + "__builtin_ctz does not take 32-bit arg"); + return __builtin_ctz(n); +#else + return CountTrailingZerosNonZero32Slow(n); +#endif +} + +#undef ABSL_BASE_INTERNAL_FORCEINLINE + +} // namespace base_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_BITS_H_ diff --git a/absl/base/internal/bits_test.cc b/absl/base/internal/bits_test.cc new file mode 100644 index 00000000..e5d991d6 --- /dev/null +++ b/absl/base/internal/bits_test.cc @@ -0,0 +1,97 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/bits.h" + +#include "gtest/gtest.h" + +namespace { + +int CLZ64(uint64_t n) { + int fast = absl::base_internal::CountLeadingZeros64(n); + int slow = absl::base_internal::CountLeadingZeros64Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountLeadingZeros64) { + EXPECT_EQ(64, CLZ64(uint64_t{})); + EXPECT_EQ(0, CLZ64(~uint64_t{})); + + for (int index = 0; index < 64; index++) { + uint64_t x = static_cast(1) << index; + const auto cnt = 63 - index; + ASSERT_EQ(cnt, CLZ64(x)) << index; + ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index; + } +} + +int CLZ32(uint32_t n) { + int fast = absl::base_internal::CountLeadingZeros32(n); + int slow = absl::base_internal::CountLeadingZeros32Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountLeadingZeros32) { + EXPECT_EQ(32, CLZ32(uint32_t{})); + EXPECT_EQ(0, CLZ32(~uint32_t{})); + + for (int index = 0; index < 32; index++) { + uint32_t x = static_cast(1) << index; + const auto cnt = 31 - index; + ASSERT_EQ(cnt, CLZ32(x)) << index; + ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index; + ASSERT_EQ(CLZ64(x), CLZ32(x) + 32); + } +} + +int CTZ64(uint64_t n) { + int fast = absl::base_internal::CountTrailingZerosNonZero64(n); + int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountTrailingZerosNonZero64) { + EXPECT_EQ(0, CTZ64(~uint64_t{})); + + for (int index = 0; index < 64; index++) { + uint64_t x = static_cast(1) << index; + const auto cnt = index; + ASSERT_EQ(cnt, CTZ64(x)) << index; + ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index; + } +} + +int CTZ32(uint32_t n) { + int fast = absl::base_internal::CountTrailingZerosNonZero32(n); + int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountTrailingZerosNonZero32) { + EXPECT_EQ(0, CTZ32(~uint32_t{})); + + for (int index = 0; index < 32; index++) { + uint32_t x = static_cast(1) << index; + const auto cnt = index; + ASSERT_EQ(cnt, CTZ32(x)) << index; + ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index; + } +} + + +} // namespace diff --git a/absl/base/internal/cycleclock.cc b/absl/base/internal/cycleclock.cc index a4f1fc21..d99b63d3 100644 --- a/absl/base/internal/cycleclock.cc +++ b/absl/base/internal/cycleclock.cc @@ -27,7 +27,7 @@ #include "absl/base/internal/unscaledcycleclock.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { #if ABSL_USE_UNSCALED_CYCLECLOCK @@ -79,5 +79,5 @@ double CycleClock::Frequency() { #endif } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/cycleclock.h b/absl/base/internal/cycleclock.h index 5fd8b348..ae5ede3e 100644 --- a/absl/base/internal/cycleclock.h +++ b/absl/base/internal/cycleclock.h @@ -46,7 +46,7 @@ #include namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // ----------------------------------------------------------------------------- @@ -73,7 +73,7 @@ class CycleClock { }; } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/absl/base/internal/direct_mmap.h b/absl/base/internal/direct_mmap.h index e98c9960..f064e363 100644 --- a/absl/base/internal/direct_mmap.h +++ b/absl/base/internal/direct_mmap.h @@ -62,7 +62,7 @@ extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); #endif // __BIONIC__ namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // Platform specific logic extracted from @@ -76,7 +76,11 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, // On these architectures, implement mmap with mmap2. static int pagesize = 0; if (pagesize == 0) { +#if defined(__wasm__) || defined(__asmjs__) pagesize = getpagesize(); +#else + pagesize = sysconf(_SC_PAGESIZE); +#endif } if (offset < 0 || offset % pagesize != 0) { errno = EINVAL; @@ -93,11 +97,13 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, #endif #elif defined(__s390x__) // On s390x, mmap() arguments are passed in memory. - uint32_t buf[6] = { - reinterpret_cast(start), static_cast(length), - static_cast(prot), static_cast(flags), - static_cast(fd), static_cast(offset)}; - return reintrepret_cast(syscall(SYS_mmap, buf)); + unsigned long buf[6] = {reinterpret_cast(start), // NOLINT + static_cast(length), // NOLINT + static_cast(prot), // NOLINT + static_cast(flags), // NOLINT + static_cast(fd), // NOLINT + static_cast(offset)}; // NOLINT + return reinterpret_cast(syscall(SYS_mmap, buf)); #elif defined(__x86_64__) // The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. // We need to explicitly cast to an unsigned 64 bit type to avoid implicit @@ -123,7 +129,7 @@ inline int DirectMunmap(void* start, size_t length) { } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #else // !__linux__ @@ -132,7 +138,7 @@ inline int DirectMunmap(void* start, size_t length) { // actual mmap()/munmap() methods. namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, @@ -145,7 +151,7 @@ inline int DirectMunmap(void* start, size_t length) { } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // __linux__ diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h index 00447110..52c09c5b 100644 --- a/absl/base/internal/endian.h +++ b/absl/base/internal/endian.h @@ -34,7 +34,7 @@ #include "absl/base/port.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { // Use compiler byte-swapping intrinsics if they are available. 32-bit // and 64-bit versions are available in Clang and GCC as of GCC 4.3.0. @@ -83,14 +83,14 @@ inline uint64_t gbswap_64(uint64_t host_int) { #elif defined(__GLIBC__) return bswap_64(host_int); #else - return (((x & uint64_t{(0xFF}) << 56) | - ((x & uint64_t{(0xFF00}) << 40) | - ((x & uint64_t{(0xFF0000}) << 24) | - ((x & uint64_t{(0xFF000000}) << 8) | - ((x & uint64_t{(0xFF00000000}) >> 8) | - ((x & uint64_t{(0xFF0000000000}) >> 24) | - ((x & uint64_t{(0xFF000000000000}) >> 40) | - ((x & uint64_t{(0xFF00000000000000}) >> 56)); + return (((host_int & uint64_t{0xFF}) << 56) | + ((host_int & uint64_t{0xFF00}) << 40) | + ((host_int & uint64_t{0xFF0000}) << 24) | + ((host_int & uint64_t{0xFF000000}) << 8) | + ((host_int & uint64_t{0xFF00000000}) >> 8) | + ((host_int & uint64_t{0xFF0000000000}) >> 24) | + ((host_int & uint64_t{0xFF000000000000}) >> 40) | + ((host_int & uint64_t{0xFF00000000000000}) >> 56)); #endif // bswap_64 } @@ -98,8 +98,10 @@ inline uint32_t gbswap_32(uint32_t host_int) { #if defined(__GLIBC__) return bswap_32(host_int); #else - return (((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) | - ((x & 0xFF000000) >> 24)); + return (((host_int & uint32_t{0xFF}) << 24) | + ((host_int & uint32_t{0xFF00}) << 8) | + ((host_int & uint32_t{0xFF0000}) >> 8) | + ((host_int & uint32_t{0xFF000000}) >> 24)); #endif } @@ -107,7 +109,8 @@ inline uint16_t gbswap_16(uint16_t host_int) { #if defined(__GLIBC__) return bswap_16(host_int); #else - return uint16_t{((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)}; + return (((host_int & uint16_t{0xFF}) << 8) | + ((host_int & uint16_t{0xFF00}) >> 8)); #endif } @@ -265,7 +268,7 @@ inline void Store64(void *p, uint64_t v) { } // namespace big_endian -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/absl/base/internal/endian_test.cc b/absl/base/internal/endian_test.cc index 66ccd45a..14ac4765 100644 --- a/absl/base/internal/endian_test.cc +++ b/absl/base/internal/endian_test.cc @@ -24,7 +24,7 @@ #include "absl/base/config.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace { const uint64_t kInitialNumber{0x0123456789abcdef}; @@ -34,32 +34,16 @@ const uint16_t k16Value{0x0123}; const int kNumValuesToTest = 1000000; const int kRandomSeed = 12345; -#ifdef ABSL_IS_BIG_ENDIAN +#if defined(ABSL_IS_BIG_ENDIAN) const uint64_t kInitialInNetworkOrder{kInitialNumber}; const uint64_t k64ValueLE{0xefcdab8967452301}; const uint32_t k32ValueLE{0x67452301}; const uint16_t k16ValueLE{0x2301}; -const uint8_t k8ValueLE{k8Value}; -const uint64_t k64IValueLE{0xefcdab89674523a1}; -const uint32_t k32IValueLE{0x67452391}; -const uint16_t k16IValueLE{0x85ff}; -const uint8_t k8IValueLE{0xff}; -const uint64_t kDoubleValueLE{0x6e861bf0f9210940}; -const uint32_t kFloatValueLE{0xd00f4940}; -const uint8_t kBoolValueLE{0x1}; const uint64_t k64ValueBE{kInitialNumber}; const uint32_t k32ValueBE{k32Value}; const uint16_t k16ValueBE{k16Value}; -const uint8_t k8ValueBE{k8Value}; -const uint64_t k64IValueBE{0xa123456789abcdef}; -const uint32_t k32IValueBE{0x91234567}; -const uint16_t k16IValueBE{0xff85}; -const uint8_t k8IValueBE{0xff}; -const uint64_t kDoubleValueBE{0x400921f9f01b866e}; -const uint32_t kFloatValueBE{0x40490fd0}; -const uint8_t kBoolValueBE{0x1}; -#elif defined ABSL_IS_LITTLE_ENDIAN +#elif defined(ABSL_IS_LITTLE_ENDIAN) const uint64_t kInitialInNetworkOrder{0xefcdab8967452301}; const uint64_t k64ValueLE{kInitialNumber}; const uint32_t k32ValueLE{k32Value}; @@ -277,5 +261,5 @@ TEST(EndianessTest, big_endian) { } } // namespace -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/exception_safety_testing.cc b/absl/base/internal/exception_safety_testing.cc index f1d081f7..8207b7d7 100644 --- a/absl/base/internal/exception_safety_testing.cc +++ b/absl/base/internal/exception_safety_testing.cc @@ -23,6 +23,10 @@ exceptions_internal::NoThrowTag nothrow_ctor; exceptions_internal::StrongGuaranteeTagType strong_guarantee; +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() { + return {}; +} + namespace exceptions_internal { int countdown = -1; diff --git a/absl/base/internal/exception_safety_testing.h b/absl/base/internal/exception_safety_testing.h index 8c2f5093..d4d41a8a 100644 --- a/absl/base/internal/exception_safety_testing.h +++ b/absl/base/internal/exception_safety_testing.h @@ -33,7 +33,7 @@ #include "absl/meta/type_traits.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" -#include "absl/types/optional.h" +#include "absl/utility/utility.h" namespace testing { @@ -127,10 +127,8 @@ class ConstructorTracker { void* address = it.first; TrackedAddress& tracked_address = it.second; if (tracked_address.is_alive) { - ADD_FAILURE() << "Object at address " << address - << " with countdown of " << countdown_ - << " was not destroyed [" << tracked_address.description - << "]"; + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + countdown_, "Object was not destroyed."); } } } @@ -141,11 +139,11 @@ class ConstructorTracker { TrackedAddress& tracked_address = current_tracker_instance_->address_map_[address]; if (tracked_address.is_alive) { - ADD_FAILURE() << "Object at address " << address << " with countdown of " - << current_tracker_instance_->countdown_ - << " was re-constructed. Previously: [" - << tracked_address.description << "] Now: [" << description - << "]"; + ADD_FAILURE() << ErrorMessage( + address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-constructed. Current object was constructed by " + + description); } tracked_address = {true, std::move(description)}; } @@ -159,10 +157,9 @@ class ConstructorTracker { TrackedAddress& tracked_address = it->second; if (!tracked_address.is_alive) { - ADD_FAILURE() << "Object at address " << address << " with countdown of " - << current_tracker_instance_->countdown_ - << " was re-destroyed or created prior to construction " - << "tracking [" << tracked_address.description << "]"; + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-destroyed."); } tracked_address.is_alive = false; } @@ -172,6 +169,16 @@ class ConstructorTracker { return current_tracker_instance_ != nullptr; } + static std::string ErrorMessage(void* address, const std::string& address_description, + int countdown, const std::string& error_description) { + return absl::Substitute( + "With coundtown at $0:\n" + " $1\n" + " Object originally constructed by $2\n" + " Object address: $3\n", + countdown, error_description, address_description, address); + } + std::unordered_map address_map_; int countdown_; @@ -190,70 +197,6 @@ class TrackedObject { ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); } }; - -template -absl::optional TestSingleInvariantAtCountdownImpl( - const Factory& factory, const Operation& operation, int count, - const Invariant& invariant) { - auto t_ptr = factory(); - absl::optional current_res; - SetCountdown(count); - try { - operation(t_ptr.get()); - } catch (const exceptions_internal::TestException& e) { - current_res.emplace(invariant(t_ptr.get())); - if (!current_res.value()) { - *current_res << e.what() << " failed invariant check"; - } - } - UnsetCountdown(); - return current_res; -} - -template -absl::optional TestSingleInvariantAtCountdownImpl( - const Factory& factory, const Operation& operation, int count, - StrongGuaranteeTagType) { - using TPtr = typename decltype(factory())::pointer; - auto t_is_strong = [&](TPtr t) { return *t == *factory(); }; - return TestSingleInvariantAtCountdownImpl(factory, operation, count, - t_is_strong); -} - -template -int TestSingleInvariantAtCountdown( - const Factory& factory, const Operation& operation, int count, - const Invariant& invariant, - absl::optional* reduced_res) { - // If reduced_res is empty, it means the current call to - // TestSingleInvariantAtCountdown(...) is the first test being run so we do - // want to run it. Alternatively, if it's not empty (meaning a previous test - // has run) we want to check if it passed. If the previous test did pass, we - // want to contine running tests so we do want to run the current one. If it - // failed, we want to short circuit so as not to overwrite the AssertionResult - // output. If that's the case, we do not run the current test and instead we - // simply return. - if (!reduced_res->has_value() || reduced_res->value()) { - *reduced_res = TestSingleInvariantAtCountdownImpl(factory, operation, count, - invariant); - } - return 0; -} - -template -inline absl::optional TestAllInvariantsAtCountdown( - const Factory& factory, const Operation& operation, int count, - const Invariants&... invariants) { - absl::optional reduced_res; - - // Run each checker, short circuiting after the first failure - int dummy[] = { - 0, (TestSingleInvariantAtCountdown(factory, operation, count, invariants, - &reduced_res))...}; - static_cast(dummy); - return reduced_res; -} - } // namespace exceptions_internal extern exceptions_internal::NoThrowTag nothrow_ctor; @@ -773,7 +716,7 @@ class ThrowingAllocator : private exceptions_internal::TrackedObject { } size_type max_size() const noexcept { - return std::numeric_limits::max() / sizeof(value_type); + return (std::numeric_limits::max)() / sizeof(value_type); } ThrowingAllocator select_on_container_copy_construction() noexcept( @@ -858,7 +801,7 @@ testing::AssertionResult TestNothrowOp(const Operation& operation) { try { operation(); return testing::AssertionSuccess(); - } catch (exceptions_internal::TestException) { + } catch (const exceptions_internal::TestException&) { return testing::AssertionFailure() << "TestException thrown during call to operation() when nothrow " "guarantee was expected."; @@ -871,7 +814,7 @@ testing::AssertionResult TestNothrowOp(const Operation& operation) { namespace exceptions_internal { -// Dummy struct for ExceptionSafetyTester<> partial state. +// Dummy struct for ExceptionSafetyTestBuilder<> partial state. struct UninitializedT {}; template @@ -884,29 +827,106 @@ class DefaultFactory { T t_; }; -template using EnableIfTestable = typename absl::enable_if_t< - LazyInvariantsCount != 0 && + LazyContractsCount != 0 && !std::is_same::value && !std::is_same::value>; template -class ExceptionSafetyTester; + typename Operation = UninitializedT, typename... Contracts> +class ExceptionSafetyTestBuilder; } // namespace exceptions_internal -exceptions_internal::ExceptionSafetyTester<> MakeExceptionSafetyTester(); +/* + * Constructs an empty ExceptionSafetyTestBuilder. All + * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation + * methods return new instances of ExceptionSafetyTestBuilder. + * + * In order to test a T for exception safety, a factory for that T, a testable + * operation, and at least one contract callback returning an assertion + * result must be applied using the respective methods. + */ +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); namespace exceptions_internal { +template +struct IsUniquePtr : std::false_type {}; + +template +struct IsUniquePtr> : std::true_type {}; + +template +struct FactoryPtrTypeHelper { + using type = decltype(std::declval()()); + + static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); +}; + +template +using FactoryPtrType = typename FactoryPtrTypeHelper::type; + +template +using FactoryElementType = typename FactoryPtrType::element_type; + +template +class ExceptionSafetyTest { + using Factory = std::function()>; + using Operation = std::function; + using Contract = std::function; + + public: + template + explicit ExceptionSafetyTest(const Factory& f, const Operation& op, + const Contracts&... contracts) + : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {} + + AssertionResult Test() const { + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + + for (const auto& contract : contracts_) { + auto t_ptr = factory_(); + try { + SetCountdown(count); + operation_(t_ptr.get()); + // Unset for the case that the operation throws no exceptions, which + // would leave the countdown set and break the *next* exception safety + // test after this one. + UnsetCountdown(); + return AssertionSuccess(); + } catch (const exceptions_internal::TestException& e) { + if (!contract(t_ptr.get())) { + return AssertionFailure() << e.what() << " failed contract check"; + } + } + } + } + } + + private: + template + Contract WrapContract(const ContractFn& contract) { + return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); }; + } + + Contract WrapContract(StrongGuaranteeTagType) { + return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); }; + } + + Factory factory_; + Operation operation_; + std::vector contracts_; +}; /* * Builds a tester object that tests if performing a operation on a T follows - * exception safety guarantees. Verification is done via invariant assertion + * exception safety guarantees. Verification is done via contract assertion * callbacks applied to T instances post-throw. * - * Template parameters for ExceptionSafetyTester: + * Template parameters for ExceptionSafetyTestBuilder: * * - Factory: The factory object (passed in via tester.WithFactory(...) or * tester.WithInitialValue(...)) must be invocable with the signature @@ -921,25 +941,25 @@ namespace exceptions_internal { * fresh T instance so it's free to modify and destroy the T instances as it * pleases. * - * - Invariants...: The invariant assertion callback objects (passed in via - * tester.WithInvariants(...)) must be invocable with the signature + * - Contracts...: The contract assertion callback objects (passed in via + * tester.WithContracts(...)) must be invocable with the signature * `testing::AssertionResult operator()(T*) const` where T is the type being - * tested. Invariant assertion callbacks are provided T instances post-throw. - * They must return testing::AssertionSuccess when the type invariants of the - * provided T instance hold. If the type invariants of the T instance do not + * tested. Contract assertion callbacks are provided T instances post-throw. + * They must return testing::AssertionSuccess when the type contracts of the + * provided T instance hold. If the type contracts of the T instance do not * hold, they must return testing::AssertionFailure. Execution order of - * Invariants... is unspecified. They will each individually get a fresh T + * Contracts... is unspecified. They will each individually get a fresh T * instance so they are free to modify and destroy the T instances as they * please. */ -template -class ExceptionSafetyTester { +template +class ExceptionSafetyTestBuilder { public: /* - * Returns a new ExceptionSafetyTester with an included T factory based on the - * provided T instance. The existing factory will not be included in the newly - * created tester instance. The created factory returns a new T instance by - * copy-constructing the provided const T& t. + * Returns a new ExceptionSafetyTestBuilder with an included T factory based + * on the provided T instance. The existing factory will not be included in + * the newly created tester instance. The created factory returns a new T + * instance by copy-constructing the provided const T& t. * * Preconditions for tester.WithInitialValue(const T& t): * @@ -948,63 +968,63 @@ class ExceptionSafetyTester { * tester.WithFactory(...). */ template - ExceptionSafetyTester, Operation, Invariants...> + ExceptionSafetyTestBuilder, Operation, Contracts...> WithInitialValue(const T& t) const { return WithFactory(DefaultFactory(t)); } /* - * Returns a new ExceptionSafetyTester with the provided T factory included. - * The existing factory will not be included in the newly-created tester - * instance. This method is intended for use with types lacking a copy + * Returns a new ExceptionSafetyTestBuilder with the provided T factory + * included. The existing factory will not be included in the newly-created + * tester instance. This method is intended for use with types lacking a copy * constructor. Types that can be copy-constructed should instead use the * method tester.WithInitialValue(...). */ template - ExceptionSafetyTester, Operation, Invariants...> + ExceptionSafetyTestBuilder, Operation, Contracts...> WithFactory(const NewFactory& new_factory) const { - return {new_factory, operation_, invariants_}; + return {new_factory, operation_, contracts_}; } /* - * Returns a new ExceptionSafetyTester with the provided testable operation - * included. The existing operation will not be included in the newly created - * tester. + * Returns a new ExceptionSafetyTestBuilder with the provided testable + * operation included. The existing operation will not be included in the + * newly created tester. */ template - ExceptionSafetyTester, Invariants...> + ExceptionSafetyTestBuilder, Contracts...> WithOperation(const NewOperation& new_operation) const { - return {factory_, new_operation, invariants_}; + return {factory_, new_operation, contracts_}; } /* - * Returns a new ExceptionSafetyTester with the provided MoreInvariants... - * combined with the Invariants... that were already included in the instance - * on which the method was called. Invariants... cannot be removed or replaced - * once added to an ExceptionSafetyTester instance. A fresh object must be - * created in order to get an empty Invariants... list. + * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... + * combined with the Contracts... that were already included in the instance + * on which the method was called. Contracts... cannot be removed or replaced + * once added to an ExceptionSafetyTestBuilder instance. A fresh object must + * be created in order to get an empty Contracts... list. * - * In addition to passing in custom invariant assertion callbacks, this method + * In addition to passing in custom contract assertion callbacks, this method * accepts `testing::strong_guarantee` as an argument which checks T instances * post-throw against freshly created T instances via operator== to verify * that any state changes made during the execution of the operation were * properly rolled back. */ - template - ExceptionSafetyTester...> - WithInvariants(const MoreInvariants&... more_invariants) const { - return {factory_, operation_, - std::tuple_cat(invariants_, - std::tuple...>( - more_invariants...))}; + template + ExceptionSafetyTestBuilder...> + WithContracts(const MoreContracts&... more_contracts) const { + return { + factory_, operation_, + std::tuple_cat(contracts_, std::tuple...>( + more_contracts...))}; } /* * Returns a testing::AssertionResult that is the reduced result of the * exception safety algorithm. The algorithm short circuits and returns - * AssertionFailure after the first invariant callback returns an - * AssertionFailure. Otherwise, if all invariant callbacks return an + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an * AssertionSuccess, the reduced result is AssertionSuccess. * * The passed-in testable operation will not be saved in a new tester instance @@ -1013,97 +1033,62 @@ class ExceptionSafetyTester { * * Preconditions for tester.Test(const NewOperation& new_operation): * - * - May only be called after at least one invariant assertion callback and a + * - May only be called after at least one contract assertion callback and a * factory or initial value have been provided. */ template < typename NewOperation, - typename = EnableIfTestable> + typename = EnableIfTestable> testing::AssertionResult Test(const NewOperation& new_operation) const { - return TestImpl(new_operation, absl::index_sequence_for()); + return TestImpl(new_operation, absl::index_sequence_for()); } /* * Returns a testing::AssertionResult that is the reduced result of the * exception safety algorithm. The algorithm short circuits and returns - * AssertionFailure after the first invariant callback returns an - * AssertionFailure. Otherwise, if all invariant callbacks return an + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an * AssertionSuccess, the reduced result is AssertionSuccess. * * Preconditions for tester.Test(): * - * - May only be called after at least one invariant assertion callback, a + * - May only be called after at least one contract assertion callback, a * factory or initial value and a testable operation have been provided. */ - template > + template < + typename LazyOperation = Operation, + typename = EnableIfTestable> testing::AssertionResult Test() const { - return TestImpl(operation_, absl::index_sequence_for()); + return Test(operation_); } private: template - friend class ExceptionSafetyTester; + friend class ExceptionSafetyTestBuilder; - friend ExceptionSafetyTester<> testing::MakeExceptionSafetyTester(); + friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); - ExceptionSafetyTester() {} + ExceptionSafetyTestBuilder() {} - ExceptionSafetyTester(const Factory& f, const Operation& o, - const std::tuple& i) - : factory_(f), operation_(o), invariants_(i) {} + ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, + const std::tuple& i) + : factory_(f), operation_(o), contracts_(i) {} template - testing::AssertionResult TestImpl(const SelectedOperation& selected_operation, + testing::AssertionResult TestImpl(SelectedOperation selected_operation, absl::index_sequence) const { - // Starting from 0 and counting upwards until one of the exit conditions is - // hit... - for (int count = 0;; ++count) { - exceptions_internal::ConstructorTracker ct(count); - - // Run the full exception safety test algorithm for the current countdown - auto reduced_res = - TestAllInvariantsAtCountdown(factory_, selected_operation, count, - std::get(invariants_)...); - // If there is no value in the optional, no invariants were run because no - // exception was thrown. This means that the test is complete and the loop - // can exit successfully. - if (!reduced_res.has_value()) { - return testing::AssertionSuccess(); - } - // If the optional is not empty and the value is falsy, an invariant check - // failed so the test must exit to propegate the failure. - if (!reduced_res.value()) { - return reduced_res.value(); - } - // If the optional is not empty and the value is not falsy, it means - // exceptions were thrown but the invariants passed so the test must - // continue to run. - } + return ExceptionSafetyTest>( + factory_, selected_operation, std::get(contracts_)...) + .Test(); } Factory factory_; Operation operation_; - std::tuple invariants_; + std::tuple contracts_; }; } // namespace exceptions_internal -/* - * Constructs an empty ExceptionSafetyTester. All ExceptionSafetyTester - * objects are immutable and all With[thing] mutation methods return new - * instances of ExceptionSafetyTester. - * - * In order to test a T for exception safety, a factory for that T, a testable - * operation, and at least one invariant callback returning an assertion - * result must be applied using the respective methods. - */ -inline exceptions_internal::ExceptionSafetyTester<> -MakeExceptionSafetyTester() { - return {}; -} - } // namespace testing #endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ diff --git a/absl/base/internal/exception_testing.h b/absl/base/internal/exception_testing.h index fd89a3f6..0cf7918e 100644 --- a/absl/base/internal/exception_testing.h +++ b/absl/base/internal/exception_testing.h @@ -35,7 +35,7 @@ EXPECT_DEATH(expr, ".*") #else #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ - EXPECT_DEATH(expr, text) + EXPECT_DEATH_IF_SUPPORTED(expr, text) #endif diff --git a/absl/base/internal/hide_ptr.h b/absl/base/internal/hide_ptr.h index a2694508..ce390dc4 100644 --- a/absl/base/internal/hide_ptr.h +++ b/absl/base/internal/hide_ptr.h @@ -18,7 +18,7 @@ #include namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // Arbitrary value with high bits set. Xor'ing with it is unlikely @@ -43,7 +43,7 @@ inline T* UnhidePtr(uintptr_t hidden) { } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ diff --git a/absl/base/internal/identity.h b/absl/base/internal/identity.h index 2eaab453..d57c83f4 100644 --- a/absl/base/internal/identity.h +++ b/absl/base/internal/identity.h @@ -17,7 +17,7 @@ #define ABSL_BASE_INTERNAL_IDENTITY_H_ namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace internal { template @@ -29,7 +29,7 @@ template using identity_t = typename identity::type; } // namespace internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/absl/base/internal/inline_variable_testing.h b/absl/base/internal/inline_variable_testing.h index 49fa4ade..be0b0b96 100644 --- a/absl/base/internal/inline_variable_testing.h +++ b/absl/base/internal/inline_variable_testing.h @@ -18,7 +18,7 @@ #include "absl/base/internal/inline_variable.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace inline_variable_testing_internal { struct Foo { @@ -40,7 +40,7 @@ const int& get_int_a(); const int& get_int_b(); } // namespace inline_variable_testing_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_ diff --git a/absl/base/internal/invoke.h b/absl/base/internal/invoke.h index bc05a0a3..1372ef50 100644 --- a/absl/base/internal/invoke.h +++ b/absl/base/internal/invoke.h @@ -43,7 +43,7 @@ // top of this file for the API documentation. namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // The five classes below each implement one of the clauses from the definition @@ -184,7 +184,7 @@ InvokeT Invoke(F&& f, Args&&... args) { std::forward(args)...); } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc index ca6239ad..10d805cc 100644 --- a/absl/base/internal/low_level_alloc.cc +++ b/absl/base/internal/low_level_alloc.cc @@ -63,7 +63,7 @@ #endif // __APPLE__ namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // A first-fit allocator with amortized logarithmic free() time. @@ -209,7 +209,7 @@ struct LowLevelAlloc::Arena { int32_t allocation_count GUARDED_BY(mu); // flags passed to NewArena const uint32_t flags; - // Result of getpagesize() + // Result of sysconf(_SC_PAGESIZE) const size_t pagesize; // Lowest power of two >= max(16, sizeof(AllocList)) const size_t roundup; @@ -325,8 +325,10 @@ size_t GetPageSize() { SYSTEM_INFO system_info; GetSystemInfo(&system_info); return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); -#else +#elif defined(__wasm__) || defined(__asmjs__) return getpagesize(); +#else + return sysconf(_SC_PAGESIZE); #endif } @@ -402,16 +404,20 @@ bool LowLevelAlloc::DeleteArena(Arena *arena) { ABSL_RAW_CHECK(munmap_result != 0, "LowLevelAlloc::DeleteArena: VitualFree failed"); #else +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { munmap_result = munmap(region, size); } else { munmap_result = base_internal::DirectMunmap(region, size); } +#else + munmap_result = munmap(region, size); +#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if (munmap_result != 0) { ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", errno); } -#endif +#endif // _WIN32 } section.Leave(); arena->~Arena(); @@ -546,6 +552,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); #else +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { new_pages = base_internal::DirectMmap(nullptr, new_pages_size, PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); @@ -553,10 +560,15 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); } +#else + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if (new_pages == MAP_FAILED) { ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); } -#endif + +#endif // _WIN32 arena->mu.Lock(); s = reinterpret_cast(new_pages); s->header.size = new_pages_size; @@ -600,7 +612,7 @@ void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_LOW_LEVEL_ALLOC_MISSING diff --git a/absl/base/internal/low_level_alloc.h b/absl/base/internal/low_level_alloc.h index 3f289571..87cfc934 100644 --- a/absl/base/internal/low_level_alloc.h +++ b/absl/base/internal/low_level_alloc.h @@ -39,10 +39,13 @@ #define ABSL_LOW_LEVEL_ALLOC_MISSING 1 #endif -// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows. +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or +// asm.js / WebAssembly. +// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html +// for more information. #ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING #error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set -#elif defined(_WIN32) +#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) #define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 #endif @@ -51,7 +54,7 @@ #include "absl/base/port.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { class LowLevelAlloc { @@ -116,6 +119,6 @@ class LowLevelAlloc { }; } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/absl/base/internal/low_level_alloc_test.cc b/absl/base/internal/low_level_alloc_test.cc index 15ffe298..65bb519d 100644 --- a/absl/base/internal/low_level_alloc_test.cc +++ b/absl/base/internal/low_level_alloc_test.cc @@ -22,7 +22,7 @@ #include namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { namespace { @@ -149,7 +149,7 @@ static struct BeforeMain { } // namespace } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl int main(int argc, char *argv[]) { diff --git a/absl/base/internal/low_level_scheduling.h b/absl/base/internal/low_level_scheduling.h index 2ae464b4..7cb6117e 100644 --- a/absl/base/internal/low_level_scheduling.h +++ b/absl/base/internal/low_level_scheduling.h @@ -28,7 +28,7 @@ extern "C" bool __google_disable_rescheduling(void); extern "C" void __google_enable_rescheduling(bool disable_result); namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { class SchedulingHelper; // To allow use of SchedulingGuard. @@ -101,6 +101,6 @@ inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc index cd1bbc09..ed8b8d7c 100644 --- a/absl/base/internal/raw_logging.cc +++ b/absl/base/internal/raw_logging.cc @@ -139,7 +139,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, #endif #ifdef ABSL_MIN_LOG_LEVEL - if (static_cast(severity) < ABSL_MIN_LOG_LEVEL && + if (severity < static_cast(ABSL_MIN_LOG_LEVEL) && severity < absl::LogSeverity::kFatal) { enabled = false; } @@ -181,7 +181,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, } // namespace namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace raw_logging_internal { void SafeWriteToStderr(const char *s, size_t len) { #if defined(ABSL_HAVE_SYSCALL_WRITE) @@ -207,6 +207,15 @@ void RawLog(absl::LogSeverity severity, const char* file, int line, va_end(ap); } +// Non-formatting version of RawLog(). +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +static void DefaultInternalLog(absl::LogSeverity severity, const char* file, + int line, const std::string& message) { + RawLog(severity, file, line, "%s", message.c_str()); +} + bool RawLoggingFullySupported() { #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED return true; @@ -215,6 +224,13 @@ bool RawLoggingFullySupported() { #endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED } +ABSL_CONST_INIT absl::base_internal::AtomicHook + internal_log_function(DefaultInternalLog); + +void RegisterInternalLogFunction(InternalLogFunction func) { + internal_log_function.Store(func); +} + } // namespace raw_logging_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h index 04ff0607..2786a3de 100644 --- a/absl/base/internal/raw_logging.h +++ b/absl/base/internal/raw_logging.h @@ -19,7 +19,10 @@ #ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ #define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ +#include + #include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook.h" #include "absl/base/log_severity.h" #include "absl/base/macros.h" #include "absl/base/port.h" @@ -57,6 +60,34 @@ } \ } while (0) +// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, +// except that if the richer log library is linked into the binary, we dispatch +// to that instead. This is potentially useful for internal logging and +// assertions, where we are using RAW_LOG neither for its async-signal-safety +// nor for its non-allocating nature, but rather because raw logging has very +// few other dependencies. +// +// The API is a subset of the above: each macro only takes two arguments. Use +// StrCat if you need to build a richer message. +#define ABSL_INTERNAL_LOG(severity, message) \ + do { \ + constexpr const char* absl_raw_logging_internal_basename = \ + ::absl::raw_logging_internal::Basename(__FILE__, \ + sizeof(__FILE__) - 1); \ + ::absl::raw_logging_internal::internal_log_function( \ + ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_basename, __LINE__, message); \ + } while (0) + +#define ABSL_INTERNAL_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + std::string death_message = "Check " #condition " failed: "; \ + death_message += std::string(message); \ + ABSL_INTERNAL_LOG(FATAL, death_message); \ + } \ + } while (0) + #define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo #define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning #define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError @@ -65,7 +96,7 @@ ::absl::NormalizeLogSeverity(severity) namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace raw_logging_internal { // Helper function to implement ABSL_RAW_LOG @@ -84,7 +115,7 @@ void SafeWriteToStderr(const char *s, size_t len); // compile-time function to get the "base" filename, that is, the part of // a filename after the last "/" or "\" path separator. The search starts at -// the end of the std::string; the second parameter is the length of the std::string. +// the end of the string; the second parameter is the length of the string. constexpr const char* Basename(const char* fname, int offset) { return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' ? fname + offset @@ -132,8 +163,20 @@ using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, using AbortHook = void (*)(const char* file, int line, const char* buf_start, const char* prefix_end, const char* buf_end); +// Internal logging function for ABSL_INTERNAL_LOG to dispatch to. +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +using InternalLogFunction = void (*)(absl::LogSeverity severity, + const char* file, int line, + const std::string& message); + +extern base_internal::AtomicHook internal_log_function; + +void RegisterInternalLogFunction(InternalLogFunction func); + } // namespace raw_logging_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/absl/base/internal/scheduling_mode.h b/absl/base/internal/scheduling_mode.h index dd7df6bb..19a7514c 100644 --- a/absl/base/internal/scheduling_mode.h +++ b/absl/base/internal/scheduling_mode.h @@ -19,7 +19,7 @@ #define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // Used to describe how a thread may be scheduled. Typically associated with @@ -50,7 +50,7 @@ enum SchedulingMode { }; } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc index 9d90b3cb..8f8eef82 100644 --- a/absl/base/internal/spinlock.cc +++ b/absl/base/internal/spinlock.cc @@ -54,7 +54,7 @@ // holder to acquire the lock. There may be outstanding waiter(s). namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { ABSL_CONST_INIT static base_internal::AtomicHook 0); - uint32_t spin_loop_wait_cycles = - EncodeWaitCycles(initial_wait_timestamp, CycleClock::Now()); - *wait_cycles = spin_loop_wait_cycles; - - return TryLockInternal(lock_value, spin_loop_wait_cycles); + return lock_value; } void SpinLock::SlowLock() { + uint32_t lock_value = SpinLoop(); + lock_value = TryLockInternal(lock_value, 0); + if ((lock_value & kSpinLockHeld) == 0) { + return; + } // The lock was not obtained initially, so this thread needs to wait for // it. Record the current timestamp in the local variable wait_start_time // so the total wait time can be stored in the lockword once this thread // obtains the lock. int64_t wait_start_time = CycleClock::Now(); - uint32_t wait_cycles; - uint32_t lock_value = SpinLoop(wait_start_time, &wait_cycles); - + uint32_t wait_cycles = 0; int lock_wait_call_count = 0; while ((lock_value & kSpinLockHeld) != 0) { // If the lock is currently held, but not marked as having a sleeper, mark @@ -142,7 +137,7 @@ void SpinLock::SlowLock() { // owner to think it experienced contention. if (lockword_.compare_exchange_strong( lock_value, lock_value | kSpinLockSleeper, - std::memory_order_acquire, std::memory_order_relaxed)) { + std::memory_order_relaxed, std::memory_order_relaxed)) { // Successfully transitioned to kSpinLockSleeper. Pass // kSpinLockSleeper to the SpinLockWait routine to properly indicate // the last lock_value observed. @@ -171,7 +166,9 @@ void SpinLock::SlowLock() { ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); // Spin again after returning from the wait routine to give this thread // some chance of obtaining the lock. - lock_value = SpinLoop(wait_start_time, &wait_cycles); + lock_value = SpinLoop(); + wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now()); + lock_value = TryLockInternal(lock_value, wait_cycles); } } @@ -207,14 +204,20 @@ uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT; // Return a representation of the time spent waiting that can be stored in - // the lock word's upper bits. bit_cast is required as Atomic32 is signed. - const uint32_t clamped = static_cast( + // the lock word's upper bits. + uint32_t clamped = static_cast( std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT); - // bump up value if necessary to avoid returning kSpinLockSleeper. - const uint32_t after_spinlock_sleeper = - kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT); - return clamped == kSpinLockSleeper ? after_spinlock_sleeper : clamped; + if (clamped == 0) { + return kSpinLockSleeper; // Just wake waiters, but don't record contention. + } + // Bump up value if necessary to avoid returning kSpinLockSleeper. + const uint32_t kMinWaitTime = + kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT); + if (clamped == kSpinLockSleeper) { + return kMinWaitTime; + } + return clamped; } uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { @@ -226,5 +229,5 @@ uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h index 7b59fe26..d53878b2 100644 --- a/absl/base/internal/spinlock.h +++ b/absl/base/internal/spinlock.h @@ -45,7 +45,7 @@ #include "absl/base/thread_annotations.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { class LOCKABLE SpinLock { @@ -102,8 +102,8 @@ class LOCKABLE SpinLock { inline void Unlock() UNLOCK_FUNCTION() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); uint32_t lock_value = lockword_.load(std::memory_order_relaxed); - lockword_.store(lock_value & kSpinLockCooperative, - std::memory_order_release); + lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, + std::memory_order_release); if ((lock_value & kSpinLockDisabledScheduling) != 0) { base_internal::SchedulingGuard::EnableRescheduling(true); @@ -162,7 +162,7 @@ class LOCKABLE SpinLock { void InitLinkerInitializedAndCooperative(); void SlowLock() ABSL_ATTRIBUTE_COLD; void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; - uint32_t SpinLoop(int64_t initial_wait_timestamp, uint32_t* wait_cycles); + uint32_t SpinLoop(); inline bool TryLockImpl() { uint32_t lock_value = lockword_.load(std::memory_order_relaxed); @@ -235,7 +235,7 @@ inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/absl/base/internal/spinlock_benchmark.cc b/absl/base/internal/spinlock_benchmark.cc new file mode 100644 index 00000000..907d3e27 --- /dev/null +++ b/absl/base/internal/spinlock_benchmark.cc @@ -0,0 +1,52 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// See also //absl/synchronization:mutex_benchmark for a comparison of SpinLock +// and Mutex performance under varying levels of contention. + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock.h" +#include "absl/synchronization/internal/create_thread_identity.h" +#include "benchmark/benchmark.h" + +namespace { + +template +static void BM_SpinLock(benchmark::State& state) { + // Ensure a ThreadIdentity is installed. + ABSL_INTERNAL_CHECK( + absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() != + nullptr, + "GetOrCreateCurrentThreadIdentity() failed"); + + static auto* spinlock = new absl::base_internal::SpinLock(scheduling_mode); + for (auto _ : state) { + absl::base_internal::SpinLockHolder holder(spinlock); + } +} + +BENCHMARK_TEMPLATE(BM_SpinLock, + absl::base_internal::SCHEDULE_KERNEL_ONLY) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +BENCHMARK_TEMPLATE(BM_SpinLock, + absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +} // namespace diff --git a/absl/base/internal/spinlock_linux.inc b/absl/base/internal/spinlock_linux.inc new file mode 100644 index 00000000..94c861dc --- /dev/null +++ b/absl/base/internal/spinlock_linux.inc @@ -0,0 +1,72 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Linux-specific part of spinlock_wait.cc + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" + +// The SpinLock lockword is `std::atomic`. Here we assert that +// `std::atomic` is bitwise equivalent of the `int` expected +// by SYS_futex. We also assume that reads/writes done to the lockword +// by SYS_futex have rational semantics with regard to the +// std::atomic<> API. C++ provides no guarantees of these assumptions, +// but they are believed to hold in practice. +static_assert(sizeof(std::atomic) == sizeof(int), + "SpinLock lockword has the wrong size for a futex"); + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#endif + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode) { + if (loop != 0) { + int save_errno = errno; + struct timespec tm; + tm.tv_sec = 0; + // Increase the delay; we expect (but do not rely on) explicit wakeups. + // We don't rely on explicit wakeups because we intentionally allow for + // a race on the kSpinLockSleeper bit. + tm.tv_nsec = 16 * absl::base_internal::SpinLockSuggestedDelayNS(loop); + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); + errno = save_errno; + } +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic *w, + bool all) { + syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); +} + +} // extern "C" diff --git a/absl/base/internal/spinlock_wait.cc b/absl/base/internal/spinlock_wait.cc index 6709d01e..3f8e43f0 100644 --- a/absl/base/internal/spinlock_wait.cc +++ b/absl/base/internal/spinlock_wait.cc @@ -13,7 +13,7 @@ // limitations under the License. // The OS-specific header included below must provide two calls: -// base::subtle::SpinLockDelay() and base::subtle::SpinLockWake(). +// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake(). // See spinlock_wait.h for the specs. #include @@ -23,6 +23,8 @@ #if defined(_WIN32) #include "absl/base/internal/spinlock_win32.inc" +#elif defined(__linux__) +#include "absl/base/internal/spinlock_linux.inc" #elif defined(__akaros__) #include "absl/base/internal/spinlock_akaros.inc" #else @@ -30,21 +32,22 @@ #endif namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // See spinlock_wait.h for spec. uint32_t SpinLockWait(std::atomic *w, int n, const SpinLockWaitTransition trans[], base_internal::SchedulingMode scheduling_mode) { - for (int loop = 0; ; loop++) { + int loop = 0; + for (;;) { uint32_t v = w->load(std::memory_order_acquire); int i; for (i = 0; i != n && v != trans[i].from; i++) { } if (i == n) { - SpinLockDelay(w, v, loop, scheduling_mode); // no matching transition - } else if (trans[i].to == v || // null transition + SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition + } else if (trans[i].to == v || // null transition w->compare_exchange_strong(v, trans[i].to, std::memory_order_acquire, std::memory_order_relaxed)) { @@ -77,5 +80,5 @@ int SpinLockSuggestedDelayNS(int loop) { } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h index 31aaa8c6..5eb727f1 100644 --- a/absl/base/internal/spinlock_wait.h +++ b/absl/base/internal/spinlock_wait.h @@ -24,7 +24,7 @@ #include "absl/base/internal/scheduling_mode.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // SpinLockWait() waits until it can perform one of several transitions from @@ -63,7 +63,7 @@ void SpinLockDelay(std::atomic *w, uint32_t value, int loop, int SpinLockSuggestedDelayNS(int loop); } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl // In some build configurations we pass --detect-odr-violations to the diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc index 7db2e001..ce14fc0f 100644 --- a/absl/base/internal/sysinfo.cc +++ b/absl/base/internal/sysinfo.cc @@ -56,7 +56,7 @@ #include "absl/base/internal/unscaledcycleclock.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { static once_flag init_system_info_once; @@ -402,5 +402,5 @@ pid_t GetTID() { #endif } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/sysinfo.h b/absl/base/internal/sysinfo.h index 18aa2e29..79100f61 100644 --- a/absl/base/internal/sysinfo.h +++ b/absl/base/internal/sysinfo.h @@ -33,7 +33,7 @@ #include "absl/base/port.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // Nominal core processor cycles per second of each processor. This is _not_ @@ -59,7 +59,7 @@ using pid_t = DWORD; pid_t GetTID(); } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/absl/base/internal/sysinfo_test.cc b/absl/base/internal/sysinfo_test.cc index fdbbdf88..c072ebc2 100644 --- a/absl/base/internal/sysinfo_test.cc +++ b/absl/base/internal/sysinfo_test.cc @@ -28,7 +28,7 @@ #include "absl/synchronization/mutex.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { namespace { @@ -96,5 +96,5 @@ TEST(SysinfoTest, LinuxGetTID) { } // namespace } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc index aa1add8b..b35bee34 100644 --- a/absl/base/internal/thread_identity.cc +++ b/absl/base/internal/thread_identity.cc @@ -28,7 +28,7 @@ #include "absl/base/internal/spinlock.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { #if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11 @@ -69,6 +69,14 @@ void SetCurrentThreadIdentity( // NOTE: Not async-safe. But can be open-coded. absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, reclaimer); + +#ifdef __EMSCRIPTEN__ + // Emscripten PThread implementation does not support signals. + // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html + // for more information. + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); +#else // We must mask signals around the call to setspecific as with current glibc, // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent()) // may zero our value. @@ -82,6 +90,8 @@ void SetCurrentThreadIdentity( pthread_setspecific(thread_identity_pthread_key, reinterpret_cast(identity)); pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr); +#endif // !__EMSCRIPTEN__ + #elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS // NOTE: Not async-safe. But can be open-coded. absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, @@ -121,5 +131,5 @@ ThreadIdentity* CurrentThreadIdentityIfPresent() { #endif } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h index 18a5a750..17ac2a7c 100644 --- a/absl/base/internal/thread_identity.h +++ b/absl/base/internal/thread_identity.h @@ -33,7 +33,7 @@ #include "absl/base/internal/per_thread_tls.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { struct SynchLocksHeld; struct SynchWaitParams; @@ -237,6 +237,6 @@ inline ThreadIdentity* CurrentThreadIdentityIfPresent() { #endif } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/absl/base/internal/thread_identity_test.cc b/absl/base/internal/thread_identity_test.cc index f39f11d2..ec93fc27 100644 --- a/absl/base/internal/thread_identity_test.cc +++ b/absl/base/internal/thread_identity_test.cc @@ -25,7 +25,7 @@ #include "absl/synchronization/mutex.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { namespace { @@ -124,5 +124,5 @@ TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { } // namespace } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/throw_delegate.cc b/absl/base/internal/throw_delegate.cc index 5466e0f3..0f73c3eb 100644 --- a/absl/base/internal/throw_delegate.cc +++ b/absl/base/internal/throw_delegate.cc @@ -22,7 +22,7 @@ #include "absl/base/internal/raw_logging.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { namespace { @@ -31,8 +31,8 @@ template #ifdef ABSL_HAVE_EXCEPTIONS throw error; #else - ABSL_RAW_LOG(ERROR, "%s", error.what()); - abort(); + ABSL_RAW_LOG(FATAL, "%s", error.what()); + std::abort(); #endif } } // namespace @@ -104,5 +104,5 @@ void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/internal/throw_delegate.h b/absl/base/internal/throw_delegate.h index d34ff79c..7e5510c0 100644 --- a/absl/base/internal/throw_delegate.h +++ b/absl/base/internal/throw_delegate.h @@ -20,7 +20,7 @@ #include namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // Helper functions that allow throwing exceptions consistently from anywhere. @@ -67,7 +67,7 @@ namespace base_internal { // [[noreturn]] void ThrowStdBadArrayNewLength(); } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h index f20a8694..07a64bba 100644 --- a/absl/base/internal/unaligned_access.h +++ b/absl/base/internal/unaligned_access.h @@ -65,7 +65,8 @@ void __sanitizer_unaligned_store64(void *p, uint64_t v); } // extern "C" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { +namespace base_internal { inline uint16_t UnalignedLoad16(const void *p) { return __sanitizer_unaligned_load16(p); @@ -91,19 +92,71 @@ inline void UnalignedStore64(void *p, uint64_t v) { __sanitizer_unaligned_store64(p, v); } -} // inline namespace lts_2018_06_20 +} // namespace base_internal +} // inline namespace lts_2018_12_18 } // namespace absl -#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#elif defined(UNDEFINED_BEHAVIOR_SANITIZER) + +namespace absl { +inline namespace lts_2018_12_18 { +namespace base_internal { + +inline uint16_t UnalignedLoad16(const void *p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(const void *p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace base_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) #define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ - (absl::UnalignedStore16(_p, _val)) + (absl::base_internal::UnalignedStore16(_p, _val)) #define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ - (absl::UnalignedStore32(_p, _val)) + (absl::base_internal::UnalignedStore32(_p, _val)) #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ - (absl::UnalignedStore64(_p, _val)) + (absl::base_internal::UnalignedStore64(_p, _val)) #elif defined(__x86_64__) || defined(_M_X64) || defined(__i386) || \ defined(_M_IX86) || defined(__ppc__) || defined(__PPC__) || \ @@ -160,8 +213,8 @@ inline void UnalignedStore64(void *p, uint64_t v) { // so we do that. namespace absl { -inline namespace lts_2018_06_20 { -namespace internal { +inline namespace lts_2018_12_18 { +namespace base_internal { struct Unaligned16Struct { uint16_t value; @@ -173,24 +226,27 @@ struct Unaligned32Struct { uint8_t dummy; // To make the size non-power-of-two. } ABSL_ATTRIBUTE_PACKED; -} // namespace internal -} // inline namespace lts_2018_06_20 +} // namespace base_internal +} // inline namespace lts_2018_12_18 } // namespace absl -#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ - ((reinterpret_cast(_p))->value) -#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ - ((reinterpret_cast(_p))->value) +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + ((reinterpret_cast(_p)) \ + ->value) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + ((reinterpret_cast(_p)) \ + ->value) -#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ - ((reinterpret_cast< ::absl::internal::Unaligned16Struct *>(_p))->value = \ - (_val)) -#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ - ((reinterpret_cast< ::absl::internal::Unaligned32Struct *>(_p))->value = \ - (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + ((reinterpret_cast< ::absl::base_internal::Unaligned16Struct *>(_p)) \ + ->value = (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + ((reinterpret_cast< ::absl::base_internal::Unaligned32Struct *>(_p)) \ + ->value = (_val)) namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { +namespace base_internal { inline uint64_t UnalignedLoad64(const void *p) { uint64_t t; @@ -200,12 +256,14 @@ inline uint64_t UnalignedLoad64(const void *p) { inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } -} // inline namespace lts_2018_06_20 +} // namespace base_internal +} // inline namespace lts_2018_12_18 } // namespace absl -#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ - (absl::UnalignedStore64(_p, _val)) + (absl::base_internal::UnalignedStore64(_p, _val)) #else @@ -217,7 +275,8 @@ inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } // unaligned loads and stores. namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { +namespace base_internal { inline uint16_t UnalignedLoad16(const void *p) { uint16_t t; @@ -243,19 +302,23 @@ inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } -} // inline namespace lts_2018_06_20 +} // namespace base_internal +} // inline namespace lts_2018_12_18 } // namespace absl -#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) #define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ - (absl::UnalignedStore16(_p, _val)) + (absl::base_internal::UnalignedStore16(_p, _val)) #define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ - (absl::UnalignedStore32(_p, _val)) + (absl::base_internal::UnalignedStore32(_p, _val)) #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ - (absl::UnalignedStore64(_p, _val)) + (absl::base_internal::UnalignedStore64(_p, _val)) #endif diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc index e0798eb9..888caf1e 100644 --- a/absl/base/internal/unscaledcycleclock.cc +++ b/absl/base/internal/unscaledcycleclock.cc @@ -27,7 +27,7 @@ #include "absl/base/internal/sysinfo.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { #if defined(__i386__) @@ -97,7 +97,7 @@ double UnscaledCycleClock::Frequency() { #endif } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_USE_UNSCALED_CYCLECLOCK diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h index 9da14d0b..c71674f3 100644 --- a/absl/base/internal/unscaledcycleclock.h +++ b/absl/base/internal/unscaledcycleclock.h @@ -84,7 +84,7 @@ #define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY #endif namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace time_internal { class UnscaledCycleClockWrapperForGetCurrentTime; } // namespace time_internal @@ -114,7 +114,7 @@ class UnscaledCycleClock { }; } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_USE_UNSCALED_CYCLECLOCK diff --git a/absl/base/invoke_test.cc b/absl/base/invoke_test.cc index 2c04b591..4df637ac 100644 --- a/absl/base/invoke_test.cc +++ b/absl/base/invoke_test.cc @@ -25,7 +25,7 @@ #include "absl/strings/str_cat.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { namespace { @@ -198,5 +198,5 @@ TEST(InvokeTest, SfinaeFriendly) { } // namespace } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/log_severity.h b/absl/base/log_severity.h index 8b7a66ba..c24fad79 100644 --- a/absl/base/log_severity.h +++ b/absl/base/log_severity.h @@ -21,7 +21,7 @@ #include "absl/base/attributes.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { // Four severity levels are defined. Logging APIs should terminate the program // when a message is logged at severity `kFatal`; the other levels have no @@ -40,7 +40,7 @@ constexpr std::array LogSeverities() { absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; } -// Returns the all-caps std::string representation (e.g. "INFO") of the specified +// Returns the all-caps string representation (e.g. "INFO") of the specified // severity level if it is one of the normal levels and "UNKNOWN" otherwise. constexpr const char* LogSeverityName(absl::LogSeverity s) { return s == absl::LogSeverity::kInfo @@ -63,7 +63,7 @@ constexpr absl::LogSeverity NormalizeLogSeverity(int s) { return NormalizeLogSeverity(static_cast(s)); } -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ diff --git a/absl/base/macros.h b/absl/base/macros.h index aabe8db4..14c4b0a3 100644 --- a/absl/base/macros.h +++ b/absl/base/macros.h @@ -43,14 +43,14 @@ (sizeof(::absl::macros_internal::ArraySizeHelper(array))) namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace macros_internal { // Note: this internal template function declaration is used by ABSL_ARRAYSIZE. // The function doesn't need a definition, as we only use its type. template auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; } // namespace macros_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl // kLinkerInitialized @@ -74,13 +74,13 @@ auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; // // Invocation // static MyClass my_global(absl::base_internal::kLinkerInitialized); namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { enum LinkerInitialized { kLinkerInitialized = 0, }; } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl // ABSL_FALLTHROUGH_INTENDED @@ -203,4 +203,14 @@ enum LinkerInitialized { : [] { assert(false && #expr); }()) // NOLINT #endif +#ifdef ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY try +#define ABSL_INTERNAL_CATCH_ANY catch (...) +#define ABSL_INTERNAL_RETHROW do { throw; } while (false) +#else // ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY if (true) +#define ABSL_INTERNAL_CATCH_ANY else if (false) +#define ABSL_INTERNAL_RETHROW do {} while (false) +#endif // ABSL_HAVE_EXCEPTIONS + #endif // ABSL_BASE_MACROS_H_ diff --git a/absl/base/raw_logging_test.cc b/absl/base/raw_logging_test.cc index dae4b351..b21cf651 100644 --- a/absl/base/raw_logging_test.cc +++ b/absl/base/raw_logging_test.cc @@ -18,12 +18,20 @@ #include "absl/base/internal/raw_logging.h" +#include + #include "gtest/gtest.h" +#include "absl/strings/str_cat.h" namespace { TEST(RawLoggingCompilationTest, Log) { ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d", 1, 2); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d", 1, 2, 3); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d", 1, 2, 3, 4); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d %d", 1, 2, 3, 4, 5); + ABSL_RAW_LOG(WARNING, "RAW WARNING: %d", 1); ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1); } @@ -32,7 +40,7 @@ TEST(RawLoggingCompilationTest, PassingCheck) { } // Not all platforms support output from raw log, so we don't verify any -// particular output for RAW check failures (expecting the empty std::string +// particular output for RAW check failures (expecting the empty string // accomplishes this). This test is primarily a compilation test, but we // are verifying process death when EXPECT_DEATH works for a platform. const char kExpectedDeathOutput[] = ""; @@ -47,4 +55,25 @@ TEST(RawLoggingDeathTest, LogFatal) { kExpectedDeathOutput); } +TEST(InternalLog, CompilationTest) { + ABSL_INTERNAL_LOG(INFO, "Internal Log"); + std::string log_msg = "Internal Log"; + ABSL_INTERNAL_LOG(INFO, log_msg); + + ABSL_INTERNAL_LOG(INFO, log_msg + " 2"); + + float d = 1.1f; + ABSL_INTERNAL_LOG(INFO, absl::StrCat("Internal log ", 3, " + ", d)); +} + +TEST(InternalLogDeathTest, FailingCheck) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, "explanation"), + kExpectedDeathOutput); +} + +TEST(InternalLogDeathTest, LogFatal) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, "my dog has fleas"), + kExpectedDeathOutput); +} + } // namespace diff --git a/absl/base/spinlock_test_common.cc b/absl/base/spinlock_test_common.cc index d04ee366..95382977 100644 --- a/absl/base/spinlock_test_common.cc +++ b/absl/base/spinlock_test_common.cc @@ -36,7 +36,7 @@ constexpr int32_t kNumThreads = 10; constexpr int32_t kIters = 1000; namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { namespace base_internal { // This is defined outside of anonymous namespace so that it can be @@ -156,7 +156,8 @@ TEST(SpinLock, WaitCyclesEncoding) { // Test corner cases int64_t start_time = time_distribution(generator); - EXPECT_EQ(0, SpinLockTest::EncodeWaitCycles(start_time, start_time)); + EXPECT_EQ(kSpinLockSleeper, + SpinLockTest::EncodeWaitCycles(start_time, start_time)); EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0)); EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask)); EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask, @@ -264,5 +265,5 @@ TEST(SpinLockWithThreads, DoesNotDeadlock) { } // namespace } // namespace base_internal -} // inline namespace lts_2018_06_20 +} // inline namespace lts_2018_12_18 } // namespace absl diff --git a/absl/base/thread_annotations.h b/absl/base/thread_annotations.h index 8d30b932..2241ace4 100644 --- a/absl/base/thread_annotations.h +++ b/absl/base/thread_annotations.h @@ -31,7 +31,6 @@ // that evaluate to a concrete mutex object whenever possible. If the mutex // you want to refer to is not in scope, you may use a member pointer // (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. -// #ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ #define ABSL_BASE_THREAD_ANNOTATIONS_H_ @@ -109,13 +108,23 @@ // The mutex is expected to be held both on entry to, and exit from, the // function. // +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// SHARED_LOCKS_REQUIRED. +// // Example: // // Mutex mu1, mu2; // int a GUARDED_BY(mu1); // int b GUARDED_BY(mu2); // -// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }; +// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } #define EXCLUSIVE_LOCKS_REQUIRED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) diff --git a/absl/compiler_config_setting.bzl b/absl/compiler_config_setting.bzl new file mode 100644 index 00000000..b77c4f56 --- /dev/null +++ b/absl/compiler_config_setting.bzl @@ -0,0 +1,39 @@ +# +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Creates config_setting that allows selecting based on 'compiler' value.""" + +def create_llvm_config(name, visibility): + # The "do_not_use_tools_cpp_compiler_present" attribute exists to + # distinguish between older versions of Bazel that do not support + # "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do. + # In the future, the only way to select on the compiler will be through + # flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can + # be removed. + if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"): + native.config_setting( + name = name, + flag_values = { + "@bazel_tools//tools/cpp:compiler": "llvm", + }, + visibility = visibility, + ) + else: + native.config_setting( + name = name, + values = {"compiler": "llvm"}, + visibility = visibility, + ) diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 119d5c88..afc869f4 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -19,17 +19,38 @@ load( "ABSL_DEFAULT_COPTS", "ABSL_TEST_COPTS", "ABSL_EXCEPTIONS_FLAG", + "ABSL_EXCEPTIONS_FLAG_LINKOPTS", ) package(default_visibility = ["//visibility:public"]) licenses(["notice"]) # Apache 2.0 +cc_library( + name = "compressed_tuple", + hdrs = ["internal/compressed_tuple.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/utility", + ], +) + +cc_test( + name = "compressed_tuple_test", + srcs = ["internal/compressed_tuple_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":compressed_tuple", + "@com_google_googletest//:gtest_main", + ], +) + cc_library( name = "fixed_array", hdrs = ["fixed_array.h"], copts = ABSL_DEFAULT_COPTS, deps = [ + ":compressed_tuple", "//absl/algorithm", "//absl/base:core_headers", "//absl/base:dynamic_annotations", @@ -42,9 +63,11 @@ cc_test( name = "fixed_array_test", srcs = ["fixed_array_test.cc"], copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, + linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS, deps = [ ":fixed_array", "//absl/base:exception_testing", + "//absl/hash:hash_testing", "//absl/memory", "@com_google_googletest//:gtest_main", ], @@ -57,11 +80,24 @@ cc_test( deps = [ ":fixed_array", "//absl/base:exception_testing", + "//absl/hash:hash_testing", "//absl/memory", "@com_google_googletest//:gtest_main", ], ) +cc_test( + name = "fixed_array_exception_safety_test", + srcs = ["fixed_array_exception_safety_test.cc"], + copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, + linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS, + deps = [ + ":fixed_array", + "//absl/base:exception_safety_testing", + "@com_google_googletest//:gtest_main", + ], +) + cc_test( name = "fixed_array_benchmark", srcs = ["fixed_array_benchmark.cc"], @@ -89,12 +125,14 @@ cc_test( name = "inlined_vector_test", srcs = ["inlined_vector_test.cc"], copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, + linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS, deps = [ ":inlined_vector", ":test_instance_tracker", "//absl/base", "//absl/base:core_headers", "//absl/base:exception_testing", + "//absl/hash:hash_testing", "//absl/memory", "//absl/strings", "@com_google_googletest//:gtest_main", @@ -111,6 +149,7 @@ cc_test( "//absl/base", "//absl/base:core_headers", "//absl/base:exception_testing", + "//absl/hash:hash_testing", "//absl/memory", "//absl/strings", "@com_google_googletest//:gtest_main", @@ -150,3 +189,462 @@ cc_test( "@com_google_googletest//:gtest_main", ], ) + +NOTEST_TAGS_NONMOBILE = [ + "no_test_darwin_x86_64", + "no_test_loonix", +] + +NOTEST_TAGS_MOBILE = [ + "no_test_android_arm", + "no_test_android_arm64", + "no_test_android_x86", + "no_test_ios_x86_64", +] + +NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE + +cc_library( + name = "flat_hash_map", + hdrs = ["flat_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":raw_hash_map", + "//absl/algorithm:container", + "//absl/memory", + ], +) + +cc_test( + name = "flat_hash_map_test", + srcs = ["flat_hash_map_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_MAP_CXX17"], + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":flat_hash_map", + ":hash_generator_testing", + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_modifiers_test", + "//absl/types:any", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "flat_hash_set", + hdrs = ["flat_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":raw_hash_set", + "//absl/algorithm:container", + "//absl/base:core_headers", + "//absl/memory", + ], +) + +cc_test( + name = "flat_hash_set_test", + srcs = ["flat_hash_set_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":flat_hash_set", + ":hash_generator_testing", + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_modifiers_test", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_map", + hdrs = ["node_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":node_hash_policy", + ":raw_hash_map", + "//absl/algorithm:container", + "//absl/memory", + ], +) + +cc_test( + name = "node_hash_map_test", + srcs = ["node_hash_map_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_MAP_CXX17"], + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":hash_generator_testing", + ":node_hash_map", + ":tracked", + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_set", + hdrs = ["node_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":hash_function_defaults", + ":node_hash_policy", + ":raw_hash_set", + "//absl/algorithm:container", + "//absl/memory", + ], +) + +cc_test( + name = "node_hash_set_test", + srcs = ["node_hash_set_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":hash_generator_testing", + ":node_hash_set", + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "container_memory", + hdrs = ["internal/container_memory.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/memory", + "//absl/utility", + ], +) + +cc_test( + name = "container_memory_test", + srcs = ["internal/container_memory_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":container_memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_function_defaults", + hdrs = ["internal/hash_function_defaults.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/base:config", + "//absl/hash", + "//absl/strings", + ], +) + +cc_test( + name = "hash_function_defaults_test", + srcs = ["internal/hash_function_defaults_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS, + deps = [ + ":hash_function_defaults", + "//absl/hash", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_generator_testing", + testonly = 1, + srcs = ["internal/hash_generator_testing.cc"], + hdrs = ["internal/hash_generator_testing.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_policy_testing", + "//absl/meta:type_traits", + "//absl/strings", + ], +) + +cc_library( + name = "hash_policy_testing", + testonly = 1, + hdrs = ["internal/hash_policy_testing.h"], + copts = ABSL_TEST_COPTS, + deps = [ + "//absl/hash", + "//absl/strings", + ], +) + +cc_test( + name = "hash_policy_testing_test", + srcs = ["internal/hash_policy_testing_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_policy_testing", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_policy_traits", + hdrs = ["internal/hash_policy_traits.h"], + copts = ABSL_DEFAULT_COPTS, + deps = ["//absl/meta:type_traits"], +) + +cc_test( + name = "hash_policy_traits_test", + srcs = ["internal/hash_policy_traits_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_policy_traits", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hashtable_debug", + hdrs = ["internal/hashtable_debug.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":hashtable_debug_hooks", + ], +) + +cc_library( + name = "hashtable_debug_hooks", + hdrs = ["internal/hashtable_debug_hooks.h"], + copts = ABSL_DEFAULT_COPTS, +) + +cc_library( + name = "node_hash_policy", + hdrs = ["internal/node_hash_policy.h"], + copts = ABSL_DEFAULT_COPTS, +) + +cc_test( + name = "node_hash_policy_test", + srcs = ["internal/node_hash_policy_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_policy_traits", + ":node_hash_policy", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "raw_hash_map", + hdrs = ["internal/raw_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":raw_hash_set", + ], +) + +cc_library( + name = "raw_hash_set", + srcs = ["internal/raw_hash_set.cc"], + hdrs = ["internal/raw_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":compressed_tuple", + ":container_memory", + ":hash_policy_traits", + ":hashtable_debug_hooks", + ":layout", + "//absl/base:bits", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:endian", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/types:optional", + "//absl/utility", + ], +) + +cc_test( + name = "raw_hash_set_test", + srcs = ["internal/raw_hash_set_test.cc"], + copts = ABSL_TEST_COPTS, + linkstatic = 1, + tags = NOTEST_TAGS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":hash_policy_testing", + ":hashtable_debug", + ":raw_hash_set", + "//absl/base", + "//absl/base:core_headers", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "raw_hash_set_allocator_test", + size = "small", + srcs = ["internal/raw_hash_set_allocator_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":raw_hash_set", + ":tracked", + "//absl/base:core_headers", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "layout", + hdrs = ["internal/layout.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/base:core_headers", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:span", + "//absl/utility", + ], +) + +cc_test( + name = "layout_test", + size = "small", + srcs = ["internal/layout_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS, + visibility = ["//visibility:private"], + deps = [ + ":layout", + "//absl/base", + "//absl/base:core_headers", + "//absl/types:span", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "tracked", + testonly = 1, + hdrs = ["internal/tracked.h"], + copts = ABSL_TEST_COPTS, +) + +cc_library( + name = "unordered_map_constructor_test", + testonly = 1, + hdrs = ["internal/unordered_map_constructor_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_lookup_test", + testonly = 1, + hdrs = ["internal/unordered_map_lookup_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_modifiers_test", + testonly = 1, + hdrs = ["internal/unordered_map_modifiers_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_constructor_test", + testonly = 1, + hdrs = ["internal/unordered_set_constructor_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_lookup_test", + testonly = 1, + hdrs = ["internal/unordered_set_lookup_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_modifiers_test", + testonly = 1, + hdrs = ["internal/unordered_set_modifiers_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "unordered_set_test", + srcs = ["internal/unordered_set_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "unordered_map_test", + srcs = ["internal/unordered_map_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index f56ce92d..8605facc 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -14,113 +14,674 @@ # limitations under the License. # +# This is deprecated and will be removed in the future. It also doesn't do +# anything anyways. Prefer to use the library associated with the API you are +# using. +absl_cc_library( + NAME + container + SRCS + "internal/raw_hash_set.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) -list(APPEND CONTAINER_PUBLIC_HEADERS - "fixed_array.h" - "inlined_vector.h" +absl_cc_library( + NAME + compressed_tuple + HDRS + "internal/compressed_tuple.h" + DEPS + absl::utility + PUBLIC ) +absl_cc_test( + NAME + compressed_tuple_test + SRCS + "internal/compressed_tuple_test.cc" + DEPS + absl::compressed_tuple + gmock_main +) -list(APPEND CONTAINER_INTERNAL_HEADERS - "internal/test_instance_tracker.h" +absl_cc_library( + NAME + fixed_array + HDRS + "fixed_array.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::algorithm + absl::core_headers + absl::dynamic_annotations + absl::throw_delegate + absl::memory + PUBLIC ) +absl_cc_test( + NAME + fixed_array_test + SRCS + "fixed_array_test.cc" + COPTS + ${ABSL_EXCEPTIONS_FLAG} + LINKOPTS + ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} + DEPS + absl::fixed_array + absl::exception_testing + absl::hash_testing + absl::memory + gmock_main +) -absl_header_library( - TARGET - absl_container - EXPORT_NAME - container +absl_cc_test( + NAME + fixed_array_test_noexceptions + SRCS + "fixed_array_test.cc" + DEPS + absl::fixed_array + absl::exception_testing + absl::hash_testing + absl::memory + gmock_main ) +absl_cc_test( + NAME + fixed_array_exception_safety_test + SRCS + "fixed_array_exception_safety_test.cc" + COPTS + ${ABSL_EXCEPTIONS_FLAG} + LINKOPTS + ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} + DEPS + absl::fixed_array + absl::exception_safety_testing + gmock_main +) -# -## TESTS -# +absl_cc_library( + NAME + inlined_vector + HDRS + "inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::throw_delegate + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + inlined_vector_test + SRCS + "inlined_vector_test.cc" + COPTS + ${ABSL_EXCEPTIONS_FLAG} + LINKOPTS + ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} + DEPS + absl::inlined_vector + absl::test_instance_tracker + absl::base + absl::core_headers + absl::exception_testing + absl::hash_testing + absl::memory + absl::strings + gmock_main +) + +absl_cc_test( + NAME + inlined_vector_test_noexceptions + SRCS + "inlined_vector_test.cc" + DEPS + absl::inlined_vector + absl::test_instance_tracker + absl::base + absl::core_headers + absl::exception_testing + absl::hash_testing + absl::memory + absl::strings + gmock_main +) -list(APPEND TEST_INSTANCE_TRACKER_LIB_SRC - "internal/test_instance_tracker.cc" - ${CONTAINER_PUBLIC_HEADERS} - ${CONTAINER_INTERNAL_HEADERS} +absl_cc_library( + NAME + test_instance_tracker + HDRS + "internal/test_instance_tracker.h" + SRCS + "internal/test_instance_tracker.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + TESTONLY ) +absl_cc_test( + NAME + test_instance_tracker_test + SRCS + "internal/test_instance_tracker_test.cc" + DEPS + absl::test_instance_tracker + gmock_main +) -absl_library( - TARGET - test_instance_tracker_lib - SOURCES - ${TEST_INSTANCE_TRACKER_LIB_SRC} - PUBLIC_LIBRARIES - absl::container - DISABLE_INSTALL +absl_cc_library( + NAME + flat_hash_map + HDRS + "flat_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC ) +absl_cc_test( + NAME + flat_hash_map_test + SRCS + "flat_hash_map_test.cc" + COPTS + "-DUNORDERED_MAP_CXX17" + DEPS + absl::flat_hash_map + absl::hash_generator_testing + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_modifiers_test + absl::any + gmock_main +) +absl_cc_library( + NAME + flat_hash_set + HDRS + "flat_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::raw_hash_set + absl::algorithm_container + absl::core_headers + absl::memory + PUBLIC +) -# test fixed_array_test -set(FIXED_ARRAY_TEST_SRC "fixed_array_test.cc") -set(FIXED_ARRAY_TEST_PUBLIC_LIBRARIES absl::base absl_throw_delegate test_instance_tracker_lib) +absl_cc_test( + NAME + flat_hash_set_test + SRCS + "flat_hash_set_test.cc" + COPTS + "-DUNORDERED_SET_CXX17" + DEPS + absl::flat_hash_set + absl::hash_generator_testing + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_modifiers_test + absl::memory + absl::strings + gmock_main +) -absl_test( - TARGET - fixed_array_test - SOURCES - ${FIXED_ARRAY_TEST_SRC} - PUBLIC_LIBRARIES - ${FIXED_ARRAY_TEST_PUBLIC_LIBRARIES} - PRIVATE_COMPILE_FLAGS - ${ABSL_EXCEPTIONS_FLAG} +absl_cc_library( + NAME + node_hash_map + HDRS + "node_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::node_hash_policy + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC ) +absl_cc_test( + NAME + node_hash_map_test + SRCS + "node_hash_map_test.cc" + COPTS + "-DUNORDERED_MAP_CXX17" + DEPS + absl::hash_generator_testing + absl::node_hash_map + absl::tracked + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_modifiers_test + gmock_main +) +absl_cc_library( + NAME + node_hash_set + HDRS + "node_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::hash_function_defaults + absl::node_hash_policy + absl::raw_hash_set + absl::algorithm_container + absl::memory + PUBLIC +) -absl_test( - TARGET - fixed_array_test_noexceptions - SOURCES - ${FIXED_ARRAY_TEST_SRC} - PUBLIC_LIBRARIES - ${FIXED_ARRAY_TEST_PUBLIC_LIBRARIES} +absl_cc_test( + NAME + node_hash_set_test + SRCS + "node_hash_set_test.cc" + COPTS + "-DUNORDERED_SET_CXX17" + DEPS + absl::hash_generator_testing + absl::node_hash_set + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_modifiers_test + gmock_main ) +absl_cc_library( + NAME + container_memory + HDRS + "internal/container_memory.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::memory + absl::utility + PUBLIC +) -# test inlined_vector_test -set(INLINED_VECTOR_TEST_SRC "inlined_vector_test.cc") -set(INLINED_VECTOR_TEST_PUBLIC_LIBRARIES absl::base absl_throw_delegate test_instance_tracker_lib) +absl_cc_test( + NAME + container_memory_test + SRCS + "internal/container_memory_test.cc" + DEPS + absl::container_memory + absl::strings + gmock_main +) -absl_test( - TARGET - inlined_vector_test - SOURCES - ${INLINED_VECTOR_TEST_SRC} - PUBLIC_LIBRARIES - ${INLINED_VECTOR_TEST_PUBLIC_LIBRARIES} +absl_cc_library( + NAME + hash_function_defaults + HDRS + "internal/hash_function_defaults.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::hash + absl::strings + PUBLIC ) -absl_test( - TARGET - inlined_vector_test_noexceptions - SOURCES - ${INLINED_VECTOR_TEST_SRC} - PUBLIC_LIBRARIES - ${INLINED_VECTOR_TEST_PUBLIC_LIBRARIES} - PRIVATE_COMPILE_FLAGS - ${ABSL_NOEXCEPTION_CXXFLAGS} +absl_cc_test( + NAME + hash_function_defaults_test + SRCS + "internal/hash_function_defaults_test.cc" + DEPS + absl::hash_function_defaults + absl::hash + absl::strings + gmock_main ) +absl_cc_library( + NAME + hash_generator_testing + HDRS + "internal/hash_generator_testing.h" + SRCS + "internal/hash_generator_testing.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_policy_testing + absl::meta + absl::strings + TESTONLY +) -# test test_instance_tracker_test -set(TEST_INSTANCE_TRACKER_TEST_SRC "internal/test_instance_tracker_test.cc") -set(TEST_INSTANCE_TRACKER_TEST_PUBLIC_LIBRARIES absl::base absl_throw_delegate test_instance_tracker_lib) +absl_cc_library( + NAME + hash_policy_testing + HDRS + "internal/hash_policy_testing.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash + absl::strings + TESTONLY +) +absl_cc_test( + NAME + hash_policy_testing_test + SRCS + "internal/hash_policy_testing_test.cc" + DEPS + absl::hash_policy_testing + gmock_main +) -absl_test( - TARGET - test_instance_tracker_test - SOURCES - ${TEST_INSTANCE_TRACKER_TEST_SRC} - PUBLIC_LIBRARIES - ${TEST_INSTANCE_TRACKER_TEST_PUBLIC_LIBRARIES} +absl_cc_library( + NAME + hash_policy_traits + HDRS + "internal/hash_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::meta + PUBLIC ) +absl_cc_test( + NAME + hash_policy_traits_test + SRCS + "internal/hash_policy_traits_test.cc" + DEPS + absl::hash_policy_traits + gmock_main +) + +absl_cc_library( + NAME + hashtable_debug + HDRS + "internal/hashtable_debug.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::hashtable_debug_hooks +) + +absl_cc_library( + NAME + hashtable_debug_hooks + HDRS + "internal/hashtable_debug_hooks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + node_hash_policy + HDRS + "internal/node_hash_policy.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_test( + NAME + node_hash_policy_test + SRCS + "internal/node_hash_policy_test.cc" + DEPS + absl::hash_policy_traits + absl::node_hash_policy + gmock_main +) + +absl_cc_library( + NAME + raw_hash_map + HDRS + "internal/raw_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::raw_hash_set + PUBLIC +) + +absl_cc_library( + NAME + raw_hash_set + HDRS + "internal/raw_hash_set.h" + SRCS + "internal/raw_hash_set.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::container_memory + absl::hash_policy_traits + absl::hashtable_debug_hooks + absl::layout + absl::bits + absl::config + absl::core_headers + absl::endian + absl::memory + absl::meta + absl::optional + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + raw_hash_set_test + SRCS + "internal/raw_hash_set_test.cc" + DEPS + absl::container_memory + absl::hash_function_defaults + absl::hash_policy_testing + absl::hashtable_debug + absl::raw_hash_set + absl::base + absl::core_headers + absl::strings + gmock_main +) + +absl_cc_test( + NAME + raw_hash_set_allocator_test + SRCS + "internal/raw_hash_set_allocator_test.cc" + DEPS + absl::raw_hash_set + absl::tracked + absl::core_headers + gmock_main +) + +absl_cc_library( + NAME + layout + HDRS + "internal/layout.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::meta + absl::strings + absl::span + absl::utility + PUBLIC +) +absl_cc_test( + NAME + layout_test + SRCS + "internal/layout_test.cc" + DEPS + absl::layout + absl::base + absl::core_headers + absl::span + gmock_main +) + +absl_cc_library( + NAME + tracked + HDRS + "internal/tracked.h" + COPTS + ${ABSL_TEST_COPTS} + TESTONLY +) + +absl_cc_library( + NAME + unordered_map_constructor_test + HDRS + "internal/unordered_map_constructor_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_map_lookup_test + HDRS + "internal/unordered_map_lookup_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_map_modifiers_test + HDRS + "internal/unordered_map_modifiers_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_set_constructor_test + HDRS + "internal/unordered_set_constructor_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_set_lookup_test + HDRS + "internal/unordered_set_lookup_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_set_modifiers_test + HDRS + "internal/unordered_set_modifiers_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_test( + NAME + unordered_set_test + SRCS + "internal/unordered_set_test.cc" + DEPS + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_modifiers_test + gmock_main +) + +absl_cc_test( + NAME + unordered_map_test + SRCS + "internal/unordered_map_test.cc" + DEPS + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_modifiers_test + gmock_main +) diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h index daa4eb22..7f6a3afd 100644 --- a/absl/container/fixed_array.h +++ b/absl/container/fixed_array.h @@ -1,4 +1,4 @@ -// Copyright 2017 The Abseil Authors. +// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -47,10 +47,11 @@ #include "absl/base/macros.h" #include "absl/base/optimization.h" #include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { constexpr static auto kFixedArrayUseDefault = static_cast(-1); @@ -58,13 +59,13 @@ constexpr static auto kFixedArrayUseDefault = static_cast(-1); // FixedArray // ----------------------------------------------------------------------------- // -// A `FixedArray` provides a run-time fixed-size array, allocating small arrays -// inline for efficiency and correctness. +// A `FixedArray` provides a run-time fixed-size array, allocating a small array +// inline for efficiency. // // Most users should not specify an `inline_elements` argument and let -// `FixedArray<>` automatically determine the number of elements +// `FixedArray` automatically determine the number of elements // to store inline based on `sizeof(T)`. If `inline_elements` is specified, the -// `FixedArray<>` implementation will inline arrays of +// `FixedArray` implementation will use inline storage for arrays with a // length <= `inline_elements`. // // Note that a `FixedArray` constructed with a `size_type` argument will @@ -77,65 +78,100 @@ constexpr static auto kFixedArrayUseDefault = static_cast(-1); // heap allocation, it will do so with global `::operator new[]()` and // `::operator delete[]()`, even if T provides class-scope overrides for these // operators. -template +template > class FixedArray { + static_assert(!std::is_array::value || std::extent::value > 0, + "Arrays with unknown bounds cannot be used with FixedArray."); + static constexpr size_t kInlineBytesDefault = 256; + using AllocatorTraits = std::allocator_traits; // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, // but this seems to be mostly pedantic. - template - using EnableIfForwardIterator = typename std::enable_if< - std::is_convertible< - typename std::iterator_traits::iterator_category, - std::forward_iterator_tag>::value, - int>::type; + template + using EnableIfForwardIterator = absl::enable_if_t::iterator_category, + std::forward_iterator_tag>::value>; + static constexpr bool NoexceptCopyable() { + return std::is_nothrow_copy_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool NoexceptMovable() { + return std::is_nothrow_move_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool DefaultConstructorIsNonTrivial() { + return !absl::is_trivially_default_constructible::value; + } public: - // For playing nicely with stl: - using value_type = T; - using iterator = T*; - using const_iterator = const T*; + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename allocator_type::value_type; + using pointer = typename allocator_type::pointer; + using const_pointer = typename allocator_type::const_pointer; + using reference = typename allocator_type::reference; + using const_reference = typename allocator_type::const_reference; + using size_type = typename allocator_type::size_type; + using difference_type = typename allocator_type::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; - using reference = T&; - using const_reference = const T&; - using pointer = T*; - using const_pointer = const T*; - using difference_type = ptrdiff_t; - using size_type = size_t; static constexpr size_type inline_elements = - inlined == kFixedArrayUseDefault - ? kInlineBytesDefault / sizeof(value_type) - : inlined; - - FixedArray(const FixedArray& other) : rep_(other.begin(), other.end()) {} - FixedArray(FixedArray&& other) noexcept( - // clang-format off - absl::allocator_is_nothrow>::value && - // clang-format on - std::is_nothrow_move_constructible::value) - : rep_(std::make_move_iterator(other.begin()), - std::make_move_iterator(other.end())) {} + (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) + : static_cast(N)); + + FixedArray( + const FixedArray& other, + const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable()) + : FixedArray(other.begin(), other.end(), a) {} + + FixedArray( + FixedArray&& other, + const allocator_type& a = allocator_type()) noexcept(NoexceptMovable()) + : FixedArray(std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end()), a) {} // Creates an array object that can store `n` elements. // Note that trivially constructible elements will be uninitialized. - explicit FixedArray(size_type n) : rep_(n) {} + explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) + : storage_(n, a) { + if (DefaultConstructorIsNonTrivial()) { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), + storage_.end()); + } + } // Creates an array initialized with `n` copies of `val`. - FixedArray(size_type n, const value_type& val) : rep_(n, val) {} + FixedArray(size_type n, const value_type& val, + const allocator_type& a = allocator_type()) + : storage_(n, a) { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), + storage_.end(), val); + } + + // Creates an array initialized with the size and contents of `init_list`. + FixedArray(std::initializer_list init_list, + const allocator_type& a = allocator_type()) + : FixedArray(init_list.begin(), init_list.end(), a) {} // Creates an array initialized with the elements from the input // range. The array's size will always be `std::distance(first, last)`. - // REQUIRES: Iter must be a forward_iterator or better. - template = 0> - FixedArray(Iter first, Iter last) : rep_(first, last) {} - - // Creates the array from an initializer_list. - FixedArray(std::initializer_list init_list) - : FixedArray(init_list.begin(), init_list.end()) {} + // REQUIRES: Iterator must be a forward_iterator or better. + template * = nullptr> + FixedArray(Iterator first, Iterator last, + const allocator_type& a = allocator_type()) + : storage_(std::distance(first, last), a) { + memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last); + } - ~FixedArray() {} + ~FixedArray() noexcept { + for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) { + AllocatorTraits::destroy(storage_.alloc(), cur); + } + } // Assignments are deleted because they break the invariant that the size of a // `FixedArray` never changes. @@ -145,7 +181,7 @@ class FixedArray { // FixedArray::size() // // Returns the length of the fixed array. - size_type size() const { return rep_.size(); } + size_type size() const { return storage_.size(); } // FixedArray::max_size() // @@ -153,7 +189,7 @@ class FixedArray { // `FixedArray`. This is equivalent to the most possible addressable bytes // over the number of bytes taken by T. constexpr size_type max_size() const { - return std::numeric_limits::max() / sizeof(value_type); + return (std::numeric_limits::max)() / sizeof(value_type); } // FixedArray::empty() @@ -170,12 +206,12 @@ class FixedArray { // // Returns a const T* pointer to elements of the `FixedArray`. This pointer // can be used to access (but not modify) the contained elements. - const_pointer data() const { return AsValue(rep_.begin()); } + const_pointer data() const { return AsValueType(storage_.begin()); } // Overload of FixedArray::data() to return a T* pointer to elements of the // fixed array. This pointer can be used to access and modify the contained // elements. - pointer data() { return AsValue(rep_.begin()); } + pointer data() { return AsValueType(storage_.begin()); } // FixedArray::operator[] // @@ -295,7 +331,7 @@ class FixedArray { // FixedArray::fill() // // Assigns the given `value` to all elements in the fixed array. - void fill(const T& value) { std::fill(begin(), end(), value); } + void fill(const value_type& val) { std::fill(begin(), end(), val); } // Relational operators. Equality operators are elementwise using // `operator==`, while order operators order FixedArrays lexicographically. @@ -324,18 +360,25 @@ class FixedArray { return !(lhs < rhs); } + template + friend H AbslHashValue(H h, const FixedArray& v) { + return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), + v.size()); + } + private: - // HolderTraits + // StorageElement // - // Wrapper to hold elements of type T for the case where T is an array type. - // If 'T' is an array type, HolderTraits::type is a struct with a 'T v;'. - // Otherwise, HolderTraits::type is simply 'T'. + // For FixedArrays with a C-style-array value_type, StorageElement is a POD + // wrapper struct called StorageElementWrapper that holds the value_type + // instance inside. This is needed for construction and destruction of the + // entire array regardless of how many dimensions it has. For all other cases, + // StorageElement is just an alias of value_type. // - // Maintainer's Note: The simpler solution would be to simply wrap T in a - // struct whether it's an array or not: 'struct Holder { T v; };', but - // that causes some paranoid diagnostics to misfire about uses of data(), - // believing that 'data()' (aka '&rep_.begin().v') is a pointer to a single - // element, rather than the packed array that it really is. + // Maintainer's Note: The simpler solution would be to simply wrap value_type + // in a struct whether it's an array or not. That causes some paranoid + // diagnostics to misfire, believing that 'data()' returns a pointer to a + // single element, rather than the packed array that it really is. // e.g.: // // FixedArray buf(1); @@ -344,157 +387,134 @@ class FixedArray { // error: call to int __builtin___sprintf_chk(etc...) // will always overflow destination buffer [-Werror] // - class HolderTraits { - template - struct SelectImpl { - using type = U; - static pointer AsValue(type* p) { return p; } - }; - - // Partial specialization for elements of array type. - template - struct SelectImpl { - struct Holder { U v[N]; }; - using type = Holder; - static pointer AsValue(type* p) { return &p->v; } - }; - using Impl = SelectImpl; - - public: - using type = typename Impl::type; - - static pointer AsValue(type *p) { return Impl::AsValue(p); } - - // TODO(billydonahue): fix the type aliasing violation - // this assertion hints at. - static_assert(sizeof(type) == sizeof(value_type), - "Holder must be same size as value_type"); + template , + size_t InnerN = std::extent::value> + struct StorageElementWrapper { + InnerT array[InnerN]; }; - using Holder = typename HolderTraits::type; - static pointer AsValue(Holder *p) { return HolderTraits::AsValue(p); } + using StorageElement = + absl::conditional_t::value, + StorageElementWrapper, value_type>; + using StorageElementBuffer = + absl::aligned_storage_t; - // InlineSpace - // - // Allocate some space, not an array of elements of type T, so that we can - // skip calling the T constructors and destructors for space we never use. - // How many elements should we store inline? - // a. If not specified, use a default of kInlineBytesDefault bytes (This is - // currently 256 bytes, which seems small enough to not cause stack overflow - // or unnecessary stack pollution, while still allowing stack allocation for - // reasonably long character arrays). - // b. Never use 0 length arrays (not ISO C++) - // - template - class InlineSpace { - public: - Holder* data() { return reinterpret_cast(space_.data()); } - void AnnotateConstruct(size_t n) const { Annotate(n, true); } - void AnnotateDestruct(size_t n) const { Annotate(n, false); } + static pointer AsValueType(pointer ptr) { return ptr; } + static pointer AsValueType(StorageElementWrapper* ptr) { + return std::addressof(ptr->array); + } - private: -#ifndef ADDRESS_SANITIZER - void Annotate(size_t, bool) const { } -#else - void Annotate(size_t n, bool creating) const { - if (!n) return; - const void* bot = &left_redzone_; - const void* beg = space_.data(); - const void* end = space_.data() + n; - const void* top = &right_redzone_ + 1; - // args: (beg, end, old_mid, new_mid) - if (creating) { - ANNOTATE_CONTIGUOUS_CONTAINER(beg, top, top, end); - ANNOTATE_CONTIGUOUS_CONTAINER(bot, beg, beg, bot); - } else { - ANNOTATE_CONTIGUOUS_CONTAINER(beg, top, end, top); - ANNOTATE_CONTIGUOUS_CONTAINER(bot, beg, bot, beg); - } + static_assert(sizeof(StorageElement) == sizeof(value_type), ""); + static_assert(alignof(StorageElement) == alignof(value_type), ""); + + struct NonEmptyInlinedStorage { + StorageElement* data() { + return reinterpret_cast(inlined_storage_.data()); } + +#ifdef ADDRESS_SANITIZER + void* RedzoneBegin() { return &redzone_begin_; } + void* RedzoneEnd() { return &redzone_end_ + 1; } #endif // ADDRESS_SANITIZER - using Buffer = - typename std::aligned_storage::type; + void AnnotateConstruct(size_type); + void AnnotateDestruct(size_type); - ADDRESS_SANITIZER_REDZONE(left_redzone_); - std::array space_; - ADDRESS_SANITIZER_REDZONE(right_redzone_); + ADDRESS_SANITIZER_REDZONE(redzone_begin_); + std::array inlined_storage_; + ADDRESS_SANITIZER_REDZONE(redzone_end_); }; - // specialization when N = 0. - template - class InlineSpace<0, U> { - public: - Holder* data() { return nullptr; } - void AnnotateConstruct(size_t) const {} - void AnnotateDestruct(size_t) const {} + struct EmptyInlinedStorage { + StorageElement* data() { return nullptr; } + void AnnotateConstruct(size_type) {} + void AnnotateDestruct(size_type) {} }; - // Rep + using InlinedStorage = + absl::conditional_t; + + // Storage // - // A const Rep object holds FixedArray's size and data pointer. + // An instance of Storage manages the inline and out-of-line memory for + // instances of FixedArray. This guarantees that even when construction of + // individual elements fails in the FixedArray constructor body, the + // destructor for Storage will still be called and out-of-line memory will be + // properly deallocated. // - class Rep : public InlineSpace { + class Storage : public InlinedStorage { public: - Rep(size_type n, const value_type& val) : n_(n), p_(MakeHolder(n)) { - std::uninitialized_fill_n(p_, n, val); - } + Storage(size_type n, const allocator_type& a) + : size_alloc_(n, a), data_(InitializeData()) {} - explicit Rep(size_type n) : n_(n), p_(MakeHolder(n)) { - // Loop optimizes to nothing for trivially constructible T. - for (Holder* p = p_; p != p_ + n; ++p) - // Note: no parens: default init only. - // Also note '::' to avoid Holder class placement new operator. - ::new (static_cast(p)) Holder; + ~Storage() noexcept { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateDestruct(size()); + } else { + AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); + } } - template - Rep(Iter first, Iter last) - : n_(std::distance(first, last)), p_(MakeHolder(n_)) { - std::uninitialized_copy(first, last, AsValue(p_)); + size_type size() const { return size_alloc_.template get<0>(); } + StorageElement* begin() const { return data_; } + StorageElement* end() const { return begin() + size(); } + allocator_type& alloc() { + return size_alloc_.template get<1>(); } - ~Rep() { - // Destruction must be in reverse order. - // Loop optimizes to nothing for trivially destructible T. - for (Holder* p = end(); p != begin();) (--p)->~Holder(); - if (IsAllocated(size())) { - std::allocator().deallocate(p_, n_); - } else { - this->AnnotateDestruct(size()); - } + private: + static bool UsingInlinedStorage(size_type n) { + return n <= inline_elements; } - Holder* begin() const { return p_; } - Holder* end() const { return p_ + n_; } - size_type size() const { return n_; } - private: - Holder* MakeHolder(size_type n) { - if (IsAllocated(n)) { - return std::allocator().allocate(n); + StorageElement* InitializeData() { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateConstruct(size()); + return InlinedStorage::data(); } else { - this->AnnotateConstruct(n); - return this->data(); + return reinterpret_cast( + AllocatorTraits::allocate(alloc(), size())); } } - bool IsAllocated(size_type n) const { return n > inline_elements; } - - const size_type n_; - Holder* const p_; + // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s + container_internal::CompressedTuple size_alloc_; + StorageElement* data_; }; - - // Data members - Rep rep_; + Storage storage_; }; -template -constexpr size_t FixedArray::inline_elements; - -template -constexpr size_t FixedArray::kInlineBytesDefault; - -} // inline namespace lts_2018_06_20 +template +constexpr size_t FixedArray::kInlineBytesDefault; + +template +constexpr typename FixedArray::size_type + FixedArray::inline_elements; + +template +void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( + typename FixedArray::size_type n) { +#ifdef ADDRESS_SANITIZER + if (!n) return; + ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n); + ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin()); +#endif // ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode +} + +template +void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( + typename FixedArray::size_type n) { +#ifdef ADDRESS_SANITIZER + if (!n) return; + ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd()); + ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data()); +#endif // ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode +} +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/absl/container/fixed_array_exception_safety_test.cc b/absl/container/fixed_array_exception_safety_test.cc new file mode 100644 index 00000000..4d0430b3 --- /dev/null +++ b/absl/container/fixed_array_exception_safety_test.cc @@ -0,0 +1,119 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/container/fixed_array.h" + +#include "gtest/gtest.h" +#include "absl/base/internal/exception_safety_testing.h" + +namespace absl { +inline namespace lts_2018_12_18 { + +namespace { + +constexpr size_t kInlined = 25; +constexpr size_t kSmallSize = kInlined / 2; +constexpr size_t kLargeSize = kInlined * 2; + +constexpr int kInitialValue = 5; +constexpr int kUpdatedValue = 10; + +using ::testing::TestThrowingCtor; + +using Thrower = testing::ThrowingValue; +using FixedArr = absl::FixedArray; + +using MoveThrower = testing::ThrowingValue; +using MoveFixedArr = absl::FixedArray; + +TEST(FixedArrayExceptionSafety, CopyConstructor) { + auto small = FixedArr(kSmallSize); + TestThrowingCtor(small); + + auto large = FixedArr(kLargeSize); + TestThrowingCtor(large); +} + +TEST(FixedArrayExceptionSafety, MoveConstructor) { + TestThrowingCtor(FixedArr(kSmallSize)); + TestThrowingCtor(FixedArr(kLargeSize)); + + // TypeSpec::kNoThrowMove + TestThrowingCtor(MoveFixedArr(kSmallSize)); + TestThrowingCtor(MoveFixedArr(kLargeSize)); +} + +TEST(FixedArrayExceptionSafety, SizeConstructor) { + TestThrowingCtor(kSmallSize); + TestThrowingCtor(kLargeSize); +} + +TEST(FixedArrayExceptionSafety, SizeValueConstructor) { + TestThrowingCtor(kSmallSize, Thrower()); + TestThrowingCtor(kLargeSize, Thrower()); +} + +TEST(FixedArrayExceptionSafety, IteratorConstructor) { + auto small = FixedArr(kSmallSize); + TestThrowingCtor(small.begin(), small.end()); + + auto large = FixedArr(kLargeSize); + TestThrowingCtor(large.begin(), large.end()); +} + +TEST(FixedArrayExceptionSafety, InitListConstructor) { + constexpr int small_inlined = 3; + using SmallFixedArr = absl::FixedArray; + + TestThrowingCtor(std::initializer_list{}); + // Test inlined allocation + TestThrowingCtor( + std::initializer_list{Thrower{}, Thrower{}}); + // Test out of line allocation + TestThrowingCtor(std::initializer_list{ + Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); +} + +testing::AssertionResult ReadMemory(FixedArr* fixed_arr) { + // Marked volatile to prevent optimization. Used for running asan tests. + volatile int sum = 0; + for (const auto& thrower : *fixed_arr) { + sum += thrower.Get(); + } + return testing::AssertionSuccess() << "Values sum to [" << sum << "]"; +} + +TEST(FixedArrayExceptionSafety, Fill) { + auto test_fill = testing::MakeExceptionSafetyTester() + .WithContracts(ReadMemory) + .WithOperation([&](FixedArr* fixed_arr_ptr) { + auto thrower = + Thrower(kUpdatedValue, testing::nothrow_ctor); + fixed_arr_ptr->fill(thrower); + }); + + EXPECT_TRUE( + test_fill.WithInitialValue(FixedArr(kSmallSize, Thrower(kInitialValue))) + .Test()); + EXPECT_TRUE( + test_fill.WithInitialValue(FixedArr(kLargeSize, Thrower(kInitialValue))) + .Test()); +} + +} // namespace + +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc index 2142132d..205ff41f 100644 --- a/absl/container/fixed_array_test.cc +++ b/absl/container/fixed_array_test.cc @@ -15,9 +15,11 @@ #include "absl/container/fixed_array.h" #include +#include #include #include #include +#include #include #include #include @@ -25,6 +27,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/internal/exception_testing.h" +#include "absl/hash/hash_testing.h" #include "absl/memory/memory.h" using ::testing::ElementsAreArray; @@ -607,6 +610,216 @@ TEST(FixedArrayTest, Fill) { empty.fill(fill_val); } +// TODO(johnsoncj): Investigate InlinedStorage default initialization in GCC 4.x +#ifndef __GNUC__ +TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) { + using T = char; + constexpr auto capacity = 10; + using FixedArrType = absl::FixedArray; + using FixedArrBuffType = + absl::aligned_storage_t; + constexpr auto scrubbed_bits = 0x95; + constexpr auto length = capacity / 2; + + FixedArrBuffType buff; + std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrBuffType)); + + FixedArrType* arr = + ::new (static_cast(std::addressof(buff))) FixedArrType(length); + EXPECT_THAT(*arr, testing::Each(scrubbed_bits)); + arr->~FixedArrType(); +} +#endif // __GNUC__ + +// This is a stateful allocator, but the state lives outside of the +// allocator (in whatever test is using the allocator). This is odd +// but helps in tests where the allocator is propagated into nested +// containers - that chain of allocators uses the same state and is +// thus easier to query for aggregate allocation information. +template +class CountingAllocator : public std::allocator { + public: + using Alloc = std::allocator; + using pointer = typename Alloc::pointer; + using size_type = typename Alloc::size_type; + + CountingAllocator() : bytes_used_(nullptr), instance_count_(nullptr) {} + explicit CountingAllocator(int64_t* b) + : bytes_used_(b), instance_count_(nullptr) {} + CountingAllocator(int64_t* b, int64_t* a) + : bytes_used_(b), instance_count_(a) {} + + template + explicit CountingAllocator(const CountingAllocator& x) + : Alloc(x), + bytes_used_(x.bytes_used_), + instance_count_(x.instance_count_) {} + + pointer allocate(size_type n, const void* const hint = nullptr) { + assert(bytes_used_ != nullptr); + *bytes_used_ += n * sizeof(T); + return Alloc::allocate(n, hint); + } + + void deallocate(pointer p, size_type n) { + Alloc::deallocate(p, n); + assert(bytes_used_ != nullptr); + *bytes_used_ -= n * sizeof(T); + } + + template + void construct(pointer p, Args&&... args) { + Alloc::construct(p, absl::forward(args)...); + if (instance_count_) { + *instance_count_ += 1; + } + } + + void destroy(pointer p) { + Alloc::destroy(p); + if (instance_count_) { + *instance_count_ -= 1; + } + } + + template + class rebind { + public: + using other = CountingAllocator; + }; + + int64_t* bytes_used_; + int64_t* instance_count_; +}; + +TEST(AllocatorSupportTest, CountInlineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated = 0; + int64_t active_instances = 0; + + { + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + + Alloc alloc(&allocated, &active_instances); + + AllocFxdArr arr(ia, ia + inlined_size, alloc); + static_cast(arr); + } + + EXPECT_EQ(allocated, 0); + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountOutoflineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated = 0; + int64_t active_instances = 0; + + { + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + Alloc alloc(&allocated, &active_instances); + + AllocFxdArr arr(ia, ia + ABSL_ARRAYSIZE(ia), alloc); + + EXPECT_EQ(allocated, arr.size() * sizeof(int)); + static_cast(arr); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountCopyInlineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated1 = 0; + int64_t allocated2 = 0; + int64_t active_instances = 0; + Alloc alloc(&allocated1, &active_instances); + Alloc alloc2(&allocated2, &active_instances); + + { + int initial_value = 1; + + AllocFxdArr arr1(inlined_size / 2, initial_value, alloc); + + EXPECT_EQ(allocated1, 0); + + AllocFxdArr arr2(arr1, alloc2); + + EXPECT_EQ(allocated2, 0); + static_cast(arr1); + static_cast(arr2); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated1 = 0; + int64_t allocated2 = 0; + int64_t active_instances = 0; + Alloc alloc(&allocated1, &active_instances); + Alloc alloc2(&allocated2, &active_instances); + + { + int initial_value = 1; + + AllocFxdArr arr1(inlined_size * 2, initial_value, alloc); + + EXPECT_EQ(allocated1, arr1.size() * sizeof(int)); + + AllocFxdArr arr2(arr1, alloc2); + + EXPECT_EQ(allocated2, inlined_size * 2 * sizeof(int)); + static_cast(arr1); + static_cast(arr2); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, SizeValAllocConstructor) { + using testing::AllOf; + using testing::Each; + using testing::SizeIs; + + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + { + auto len = inlined_size / 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, 0); + EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); + } + + { + auto len = inlined_size * 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, len * sizeof(int)); + EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); + } +} + #ifdef ADDRESS_SANITIZER TEST(FixedArrayTest, AddressSanitizerAnnotations1) { absl::FixedArray a(10); diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h new file mode 100644 index 00000000..ed453348 --- /dev/null +++ b/absl/container/flat_hash_map.h @@ -0,0 +1,582 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `flat_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. + +#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_ +#define ABSL_CONTAINER_FLAT_HASH_MAP_H_ + +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +template +struct FlatHashMapPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_map +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map` with +// the following notable differences: +// +// * Requires keys that are CopyConstructible +// * Requires values that are MoveConstructible +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_map` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `flat_hash_map`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// NOTE: A `flat_hash_map` stores its value types directly inside its +// implementation array to avoid memory indirection. Because a `flat_hash_map` +// is designed to move data when rehashed, map values will not retain pointer +// stability. If you require pointer stability, or your values are large, +// consider using `absl::flat_hash_map>` instead. +// If your types are not moveable or you require pointer stability for keys, +// consider `absl::node_hash_map`. +// +// Example: +// +// // Create a flat hash map of three strings (that map to strings) +// absl::flat_hash_map ducks = +// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; +// +// // Insert a new element into the flat hash map +// ducks.insert({"d", "donald"}); +// +// // Force a rehash of the flat hash map +// ducks.rehash(0); +// +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Allocator = std::allocator>> +class flat_hash_map : public absl::container_internal::raw_hash_map< + absl::container_internal::FlatHashMapPolicy, + Hash, Eq, Allocator> { + using Base = typename flat_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_map map1; + // + // * Initializer List constructor + // + // absl::flat_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::flat_hash_map map7(v.begin(), v.end()); + flat_hash_map() {} + using Base::Base; + + // flat_hash_map::begin() + // + // Returns an iterator to the beginning of the `flat_hash_map`. + using Base::begin; + + // flat_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_map`. + using Base::cbegin; + + // flat_hash_map::cend() + // + // Returns a const iterator to the end of the `flat_hash_map`. + using Base::cend; + + // flat_hash_map::end() + // + // Returns an iterator to the end of the `flat_hash_map`. + using Base::end; + + // flat_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_map`. + // + // NOTE: this member function is particular to `absl::flat_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_map::empty() + // + // Returns whether or not the `flat_hash_map` is empty. + using Base::empty; + + // flat_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_map` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_map`. + using Base::max_size; + + // flat_hash_map::size() + // + // Returns the number of elements currently within the `flat_hash_map`. + using Base::size; + + // flat_hash_map::clear() + // + // Removes all elements from the `flat_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_map::erase() + // + // Erases elements within the `flat_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_map`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists. + using Base::erase; + + // flat_hash_map::insert() + // + // Inserts an element of the specified value into the `flat_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `flat_hash_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `flat_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due + // to the insertion, all existing iterators are invalidated. Overloads are + // listed below. + // + // pair insert_or_assign(const init_type& k, T&& obj): + // pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // flat_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // pair try_emplace(const key_type& k, Args&&... args): + // pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const init_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // flat_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `flat_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + using Base::extract; + + // flat_hash_map::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_map`. If the destination `flat_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_map::swap(flat_hash_map& other) + // + // Exchanges the contents of this `flat_hash_map` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash map's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_map::rehash(count) + // + // Rehashes the `flat_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_map`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_map::reserve(count) + // + // Sets the number of slots in the `flat_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // flat_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `flat_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // flat_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `flat_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `flat_hash_map`. + using Base::count; + + // flat_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_map`. + using Base::equal_range; + + // flat_hash_map::find() + // + // Finds an element with the passed `key` within the `flat_hash_map`. + using Base::find; + + // flat_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `flat_hash_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs and results in a rehashing of the container, all + // iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // flat_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_map`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_map`. + using Base::bucket_count; + + // flat_hash_map::load_factor() + // + // Returns the current load factor of the `flat_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_map`. Overloads are + // listed below. + // + // float flat_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_map`. + // + // void flat_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_map::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_map`. + using Base::get_allocator; + + // flat_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_map`. + using Base::hash_function; + + // flat_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +namespace container_internal { + +template +struct FlatHashMapPolicy { + using slot_type = container_internal::slot_type; + using key_type = K; + using mapped_type = V; + using init_type = std::pair; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + slot_type::construct(alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + slot_type::destroy(alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + slot_type::transfer(alloc, new_slot, old_slot); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposePair(std::forward(f), + std::forward(args)...); + } + + static size_t space_used(const slot_type*) { return 0; } + + static std::pair& element(slot_type* slot) { return slot->value; } + + static V& value(std::pair* kv) { return kv->second; } + static const V& value(const std::pair* kv) { return kv->second; } +}; + +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer< + absl::flat_hash_map> : std::true_type {}; + +} // namespace container_algorithm_internal + +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc new file mode 100644 index 00000000..02d2fa81 --- /dev/null +++ b/absl/container/flat_hash_map_test.cc @@ -0,0 +1,243 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/flat_hash_map.h" + +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/unordered_map_constructor_test.h" +#include "absl/container/internal/unordered_map_lookup_test.h" +#include "absl/container/internal/unordered_map_modifiers_test.h" +#include "absl/types/any.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::_; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +template +using Map = + flat_hash_map>; + +static_assert(!std::is_standard_layout(), ""); + +using MapTypes = + ::testing::Types, Map, Map, + Map, Map, + Map>; + +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, ConstructorTest, MapTypes); +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, LookupTest, MapTypes); +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, ModifiersTest, MapTypes); + +TEST(FlatHashMap, StandardLayout) { + struct Int { + explicit Int(size_t value) : value(value) {} + Int() : value(0) { ADD_FAILURE(); } + Int(const Int& other) : value(other.value) { ADD_FAILURE(); } + Int(Int&&) = default; + bool operator==(const Int& other) const { return value == other.value; } + size_t value; + }; + static_assert(std::is_standard_layout(), ""); + + struct Hash { + size_t operator()(const Int& obj) const { return obj.value; } + }; + + // Verify that neither the key nor the value get default-constructed or + // copy-constructed. + { + flat_hash_map m; + m.try_emplace(Int(1), Int(2)); + m.try_emplace(Int(3), Int(4)); + m.erase(Int(1)); + m.rehash(2 * m.bucket_count()); + } + { + flat_hash_map m; + m.try_emplace(Int(1), Int(2)); + m.try_emplace(Int(3), Int(4)); + m.erase(Int(1)); + m.clear(); + } +} + +// gcc becomes unhappy if this is inside the method, so pull it out here. +struct balast {}; + +TEST(FlatHashMap, IteratesMsan) { + // Because SwissTable randomizes on pointer addresses, we keep old tables + // around to ensure we don't reuse old memory. + std::vector> garbage; + for (int i = 0; i < 100; ++i) { + absl::flat_hash_map t; + for (int j = 0; j < 100; ++j) { + t[j]; + for (const auto& p : t) EXPECT_THAT(p, Pair(_, _)); + } + garbage.push_back(std::move(t)); + } +} + +// Demonstration of the "Lazy Key" pattern. This uses heterogeneous insert to +// avoid creating expensive key elements when the item is already present in the +// map. +struct LazyInt { + explicit LazyInt(size_t value, int* tracker) + : value(value), tracker(tracker) {} + + explicit operator size_t() const { + ++*tracker; + return value; + } + + size_t value; + int* tracker; +}; + +struct Hash { + using is_transparent = void; + int* tracker; + size_t operator()(size_t obj) const { + ++*tracker; + return obj; + } + size_t operator()(const LazyInt& obj) const { + ++*tracker; + return obj.value; + } +}; + +struct Eq { + using is_transparent = void; + bool operator()(size_t lhs, size_t rhs) const { + return lhs == rhs; + } + bool operator()(size_t lhs, const LazyInt& rhs) const { + return lhs == rhs.value; + } +}; + +TEST(FlatHashMap, LazyKeyPattern) { + // hashes are only guaranteed in opt mode, we use assertions to track internal + // state that can cause extra calls to hash. + int conversions = 0; + int hashes = 0; + flat_hash_map m(0, Hash{&hashes}); + + m[LazyInt(1, &conversions)] = 1; + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1))); + EXPECT_EQ(conversions, 1); +#ifdef NDEBUG + EXPECT_EQ(hashes, 1); +#endif + + m[LazyInt(1, &conversions)] = 2; + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2))); + EXPECT_EQ(conversions, 1); +#ifdef NDEBUG + EXPECT_EQ(hashes, 2); +#endif + + m.try_emplace(LazyInt(2, &conversions), 3); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); + EXPECT_EQ(conversions, 2); +#ifdef NDEBUG + EXPECT_EQ(hashes, 3); +#endif + + m.try_emplace(LazyInt(2, &conversions), 4); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); + EXPECT_EQ(conversions, 2); +#ifdef NDEBUG + EXPECT_EQ(hashes, 4); +#endif +} + +TEST(FlatHashMap, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + flat_hash_map m; + m.erase(n); + m.count(n); + m.prefetch(n); + m.find(n); + m.contains(n); + m.equal_range(n); + m.insert_or_assign(n, n); + m.insert_or_assign(m.end(), n, n); + m.try_emplace(n); + m.try_emplace(m.end(), n); + m.at(n); + m[n]; +} + +TEST(FlatHashMap, MergeExtractInsert) { + // We can't test mutable keys, or non-copyable keys with flat_hash_map. + // Test that the nodes have the proper API. + absl::flat_hash_map m = {{1, 7}, {2, 9}}; + auto node = m.extract(1); + EXPECT_TRUE(node); + EXPECT_EQ(node.key(), 1); + EXPECT_EQ(node.mapped(), 7); + EXPECT_THAT(m, UnorderedElementsAre(Pair(2, 9))); + + node.mapped() = 17; + m.insert(std::move(node)); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9))); +} +#if !defined(__ANDROID__) && !defined(__APPLE__) && !defined(__EMSCRIPTEN__) +TEST(FlatHashMap, Any) { + absl::flat_hash_map m; + m.emplace(1, 7); + auto it = m.find(1); + ASSERT_NE(it, m.end()); + EXPECT_EQ(7, absl::any_cast(it->second)); + + m.emplace(std::piecewise_construct, std::make_tuple(2), std::make_tuple(8)); + it = m.find(2); + ASSERT_NE(it, m.end()); + EXPECT_EQ(8, absl::any_cast(it->second)); + + m.emplace(std::piecewise_construct, std::make_tuple(3), + std::make_tuple(absl::any(9))); + it = m.find(3); + ASSERT_NE(it, m.end()); + EXPECT_EQ(9, absl::any_cast(it->second)); + + struct H { + size_t operator()(const absl::any&) const { return 0; } + }; + struct E { + bool operator()(const absl::any&, const absl::any&) const { return true; } + }; + absl::flat_hash_map m2; + m2.emplace(1, 7); + auto it2 = m2.find(1); + ASSERT_NE(it2, m2.end()); + EXPECT_EQ(7, it2->second); +} +#endif // __ANDROID__ + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h new file mode 100644 index 00000000..b175b1bf --- /dev/null +++ b/absl/container/flat_hash_set.h @@ -0,0 +1,491 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of set elements can be done +// as an `O(1)` operation. However, `flat_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash set should be a set of type +// `flat_hash_set`. +#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_ +#define ABSL_CONTAINER_FLAT_HASH_SET_H_ + +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +template +struct FlatHashSetPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_set +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container which has +// been optimized for both speed and memory footprint in most common use cases. +// Its interface is similar to that of `std::unordered_set` with the +// following notable differences: +// +// * Requires keys that are CopyConstructible +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the set is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All +// fundamental and Abseil types that support the `absl::Hash` framework have a +// compatible equality operator for comparing insertions into `flat_hash_map`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// NOTE: A `flat_hash_set` stores its keys directly inside its implementation +// array to avoid memory indirection. Because a `flat_hash_set` is designed to +// move data when rehashed, set keys will not retain pointer stability. If you +// require pointer stability, consider using +// `absl::flat_hash_set>`. If your type is not moveable and +// you require pointer stability, consider `absl::node_hash_set` instead. +// +// Example: +// +// // Create a flat hash set of three strings +// absl::flat_hash_set ducks = +// {"huey", "dewey", "louie"}; +// +// // Insert a new element into the flat hash set +// ducks.insert("donald"); +// +// // Force a rehash of the flat hash set +// ducks.rehash(0); +// +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Allocator = std::allocator> +class flat_hash_set + : public absl::container_internal::raw_hash_set< + absl::container_internal::FlatHashSetPolicy, Hash, Eq, Allocator> { + using Base = typename flat_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_set supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_set set1; + // + // * Initializer List constructor + // + // absl::flat_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::flat_hash_set set7(v.begin(), v.end()); + flat_hash_set() {} + using Base::Base; + + // flat_hash_set::begin() + // + // Returns an iterator to the beginning of the `flat_hash_set`. + using Base::begin; + + // flat_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_set`. + using Base::cbegin; + + // flat_hash_set::cend() + // + // Returns a const iterator to the end of the `flat_hash_set`. + using Base::cend; + + // flat_hash_set::end() + // + // Returns an iterator to the end of the `flat_hash_set`. + using Base::end; + + // flat_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_set`. + // + // NOTE: this member function is particular to `absl::flat_hash_set` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_set::empty() + // + // Returns whether or not the `flat_hash_set` is empty. + using Base::empty; + + // flat_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_set`. + using Base::max_size; + + // flat_hash_set::size() + // + // Returns the number of elements currently within the `flat_hash_set`. + using Base::size; + + // flat_hash_set::clear() + // + // Removes all elements from the `flat_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_set::erase() + // + // Erases elements within the `flat_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_set`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists. + using Base::erase; + + // flat_hash_set::insert() + // + // Inserts an element of the specified value into the `flat_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `flat_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `flat_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // flat_hash_set::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_set`. If the destination `flat_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_set::swap(flat_hash_set& other) + // + // Exchanges the contents of this `flat_hash_set` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash set's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_set::rehash(count) + // + // Rehashes the `flat_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_set::reserve(count) + // + // Sets the number of slots in the `flat_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `flat_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // flat_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `flat_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `flat_hash_set`. + using Base::count; + + // flat_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_set`. + using Base::equal_range; + + // flat_hash_set::find() + // + // Finds an element with the passed `key` within the `flat_hash_set`. + using Base::find; + + // flat_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_set`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_set`. + using Base::bucket_count; + + // flat_hash_set::load_factor() + // + // Returns the current load factor of the `flat_hash_set` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_set`. Overloads are + // listed below. + // + // float flat_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_set`. + // + // void flat_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_set::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_set`. + using Base::get_allocator; + + // flat_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_set`. + using Base::hash_function; + + // flat_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +namespace container_internal { + +template +struct FlatHashSetPolicy { + using slot_type = T; + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + static T& element(slot_type* slot) { return *slot; } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } + + static size_t space_used(const T*) { return 0; } +}; +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer> + : std::true_type {}; + +} // namespace container_algorithm_internal + +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc new file mode 100644 index 00000000..cabc2b59 --- /dev/null +++ b/absl/container/flat_hash_set_test.cc @@ -0,0 +1,128 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/flat_hash_set.h" + +#include + +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/unordered_set_constructor_test.h" +#include "absl/container/internal/unordered_set_lookup_test.h" +#include "absl/container/internal/unordered_set_modifiers_test.h" +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::Pointee; +using ::testing::UnorderedElementsAre; +using ::testing::UnorderedElementsAreArray; + +template +using Set = + absl::flat_hash_set>; + +using SetTypes = + ::testing::Types, Set, Set, Set>; + +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, ConstructorTest, SetTypes); +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, LookupTest, SetTypes); +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, ModifiersTest, SetTypes); + +TEST(FlatHashSet, EmplaceString) { + std::vector v = {"a", "b"}; + absl::flat_hash_set hs(v.begin(), v.end()); + EXPECT_THAT(hs, UnorderedElementsAreArray(v)); +} + +TEST(FlatHashSet, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + absl::flat_hash_set s = {n}; + s.insert(n); + s.insert(s.end(), n); + s.insert({n}); + s.erase(n); + s.count(n); + s.prefetch(n); + s.find(n); + s.contains(n); + s.equal_range(n); +} + +TEST(FlatHashSet, MergeExtractInsert) { + struct Hash { + size_t operator()(const std::unique_ptr& p) const { return *p; } + }; + struct Eq { + bool operator()(const std::unique_ptr& a, + const std::unique_ptr& b) const { + return *a == *b; + } + }; + absl::flat_hash_set, Hash, Eq> set1, set2; + set1.insert(absl::make_unique(7)); + set1.insert(absl::make_unique(17)); + + set2.insert(absl::make_unique(7)); + set2.insert(absl::make_unique(19)); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19))); + + set1.merge(set2); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + auto node = set1.extract(absl::make_unique(7)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(7)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19))); + + auto insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_FALSE(insert_result.inserted); + EXPECT_TRUE(insert_result.node); + EXPECT_THAT(insert_result.node.value(), Pointee(7)); + EXPECT_EQ(**insert_result.position, 7); + EXPECT_NE(insert_result.position->get(), insert_result.node.value().get()); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + node = set1.extract(absl::make_unique(17)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(17)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19))); + + node.value() = absl::make_unique(23); + + insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_TRUE(insert_result.inserted); + EXPECT_FALSE(insert_result.node); + EXPECT_EQ(**insert_result.position, 23); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23))); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 183ade54..37714baf 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -1,4 +1,4 @@ -// Copyright 2017 The Abseil Authors. +// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,17 +20,17 @@ // vector" which behaves in an equivalent fashion to a `std::vector`, except // that storage for small sequences of the vector are provided inline without // requiring any heap allocation. - -// An `absl::InlinedVector` specifies the size N at which to inline as one -// of its template parameters. Vectors of length <= N are provided inline. -// Typically N is very small (e.g., 4) so that sequences that are expected to be -// short do not require allocations. - -// An `absl::InlinedVector` does not usually require a specific allocator; if +// +// An `absl::InlinedVector` specifies the default capacity `N` as one of +// its template parameters. Instances where `size() <= N` hold contained +// elements in inline space. Typically `N` is very small so that sequences that +// are expected to be short do not require allocations. +// +// An `absl::InlinedVector` does not usually require a specific allocator. If // the inlined vector grows beyond its initial constraints, it will need to -// allocate (as any normal `std::vector` would) and it will generally use the -// default allocator in that case; optionally, a custom allocator may be -// specified using an `absl::InlinedVector` construction. +// allocate (as any normal `std::vector` would). This is usually performed with +// the default allocator (defined as `std::allocator`). Optionally, a custom +// allocator type may be specified as `A` in `absl::InlinedVector`. #ifndef ABSL_CONTAINER_INLINED_VECTOR_H_ #define ABSL_CONTAINER_INLINED_VECTOR_H_ @@ -53,7 +53,7 @@ #include "absl/memory/memory.h" namespace absl { -inline namespace lts_2018_06_20 { +inline namespace lts_2018_12_18 { // ----------------------------------------------------------------------------- // InlinedVector @@ -62,12 +62,30 @@ inline namespace lts_2018_06_20 { // An `absl::InlinedVector` is designed to be a drop-in replacement for // `std::vector` for use cases where the vector's size is sufficiently small // that it can be inlined. If the inlined vector does grow beyond its estimated -// size, it will trigger an initial allocation on the heap, and will behave as a -// `std:vector`. The API of the `absl::InlinedVector` within this file is +// capacity, it will trigger an initial allocation on the heap, and will behave +// as a `std:vector`. The API of the `absl::InlinedVector` within this file is // designed to cover the same API footprint as covered by `std::vector`. -template > +template > class InlinedVector { - using AllocatorTraits = std::allocator_traits; + static_assert(N > 0, "InlinedVector requires inline capacity greater than 0"); + constexpr static typename A::size_type inlined_capacity() { + return static_cast(N); + } + + template + using DisableIfIntegral = + absl::enable_if_t::value>; + + template + using EnableIfInputIterator = absl::enable_if_t::iterator_category, + std::input_iterator_tag>::value>; + + template + using IteratorCategory = + typename std::iterator_traits::iterator_category; + + using rvalue_reference = typename A::value_type&&; public: using allocator_type = A; @@ -83,51 +101,64 @@ class InlinedVector { using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; + // --------------------------------------------------------------------------- + // InlinedVector Constructors and Destructor + // --------------------------------------------------------------------------- + + // Creates an empty inlined vector with a default initialized allocator. InlinedVector() noexcept(noexcept(allocator_type())) : allocator_and_tag_(allocator_type()) {} + // Creates an empty inlined vector with a specified allocator. explicit InlinedVector(const allocator_type& alloc) noexcept : allocator_and_tag_(alloc) {} - // Create a vector with n copies of value_type(). - explicit InlinedVector(size_type n) : allocator_and_tag_(allocator_type()) { + // Creates an inlined vector with `n` copies of `value_type()`. + explicit InlinedVector(size_type n, + const allocator_type& alloc = allocator_type()) + : allocator_and_tag_(alloc) { InitAssign(n); } - // Create a vector with n copies of elem - InlinedVector(size_type n, const value_type& elem, + // Creates an inlined vector with `n` copies of `v`. + InlinedVector(size_type n, const_reference v, const allocator_type& alloc = allocator_type()) : allocator_and_tag_(alloc) { - InitAssign(n, elem); + InitAssign(n, v); } - // Create and initialize with the elements [first .. last). - // The unused enable_if argument restricts this constructor so that it is - // elided when value_type is an integral type. This prevents ambiguous - // interpretation between a call to this constructor with two integral - // arguments and a call to the preceding (n, elem) constructor. - template - InlinedVector( - InputIterator first, InputIterator last, - const allocator_type& alloc = allocator_type(), - typename std::enable_if::value>::type* = - nullptr) + // Creates an inlined vector of copies of the values in `init_list`. + InlinedVector(std::initializer_list init_list, + const allocator_type& alloc = allocator_type()) : allocator_and_tag_(alloc) { - AppendRange(first, last); + AppendRange(init_list.begin(), init_list.end(), + IteratorCategory{}); } - InlinedVector(std::initializer_list init, + // Creates an inlined vector with elements constructed from the provided + // Iterator range [`first`, `last`). + // + // NOTE: The `enable_if` prevents ambiguous interpretation between a call to + // this constructor with two integral arguments and a call to the above + // `InlinedVector(size_type, const_reference)` constructor. + template * = nullptr> + InlinedVector(InputIterator first, InputIterator last, const allocator_type& alloc = allocator_type()) : allocator_and_tag_(alloc) { - AppendRange(init.begin(), init.end()); + AppendRange(first, last, IteratorCategory{}); } - InlinedVector(const InlinedVector& v); - InlinedVector(const InlinedVector& v, const allocator_type& alloc); + // Creates a copy of `other` using `other`'s allocator. + InlinedVector(const InlinedVector& other); - // This move constructor does not allocate and only moves the underlying + // Creates a copy of `other` but with a specified allocator. + InlinedVector(const InlinedVector& other, const allocator_type& alloc); + + // Creates an inlined vector by moving in the contents of `other`. + // + // NOTE: This move constructor does not allocate and only moves the underlying // objects, so its `noexcept` specification depends on whether moving the - // underlying objects can throw or not. We assume + // underlying objects can throw or not. We assume: // a) move constructors should only throw due to allocation failure and // b) if `value_type`'s move constructor allocates, it uses the same // allocation function as the `InlinedVector`'s allocator, so the move @@ -137,408 +168,422 @@ class InlinedVector { absl::allocator_is_nothrow::value || std::is_nothrow_move_constructible::value); - // This move constructor allocates and also moves the underlying objects, so - // its `noexcept` specification depends on whether the allocation can throw - // and whether moving the underlying objects can throw. Based on the same - // assumptions above, the `noexcept` specification is dominated by whether the - // allocation can throw regardless of whether `value_type`'s move constructor - // is specified as `noexcept`. + // Creates an inlined vector by moving in the contents of `other`. + // + // NOTE: This move constructor allocates and subsequently moves the underlying + // objects, so its `noexcept` specification depends on whether the allocation + // can throw and whether moving the underlying objects can throw. Based on the + // same assumptions as above, the `noexcept` specification is dominated by + // whether the allocation can throw regardless of whether `value_type`'s move + // constructor is specified as `noexcept`. InlinedVector(InlinedVector&& v, const allocator_type& alloc) noexcept( absl::allocator_is_nothrow::value); ~InlinedVector() { clear(); } - InlinedVector& operator=(const InlinedVector& v) { - if (this == &v) { - return *this; - } - // Optimized to avoid reallocation. - // Prefer reassignment to copy construction for elements. - if (size() < v.size()) { // grow - reserve(v.size()); - std::copy(v.begin(), v.begin() + size(), begin()); - std::copy(v.begin() + size(), v.end(), std::back_inserter(*this)); - } else { // maybe shrink - erase(begin() + v.size(), end()); - std::copy(v.begin(), v.end(), begin()); - } - return *this; - } - - InlinedVector& operator=(InlinedVector&& v) { - if (this == &v) { - return *this; - } - if (v.allocated()) { - clear(); - tag().set_allocated_size(v.size()); - init_allocation(v.allocation()); - v.tag() = Tag(); - } else { - if (allocated()) clear(); - // Both are inlined now. - if (size() < v.size()) { - auto mid = std::make_move_iterator(v.begin() + size()); - std::copy(std::make_move_iterator(v.begin()), mid, begin()); - UninitializedCopy(mid, std::make_move_iterator(v.end()), end()); - } else { - auto new_end = std::copy(std::make_move_iterator(v.begin()), - std::make_move_iterator(v.end()), begin()); - Destroy(new_end, end()); - } - tag().set_inline_size(v.size()); - } - return *this; - } - - InlinedVector& operator=(std::initializer_list init) { - AssignRange(init.begin(), init.end()); - return *this; - } + // --------------------------------------------------------------------------- + // InlinedVector Member Accessors + // --------------------------------------------------------------------------- - // InlinedVector::assign() + // `InlinedVector::empty()` // - // Replaces the contents of the inlined vector with copies of those in the - // iterator range [first, last). - template - void assign( - InputIterator first, InputIterator last, - typename std::enable_if::value>::type* = - nullptr) { - AssignRange(first, last); - } - - // Overload of `InlinedVector::assign()` to take values from elements of an - // initializer list - void assign(std::initializer_list init) { - AssignRange(init.begin(), init.end()); - } - - // Overload of `InlinedVector::assign()` to replace the first `n` elements of - // the inlined vector with `elem` values. - void assign(size_type n, const value_type& elem) { - if (n <= size()) { // Possibly shrink - std::fill_n(begin(), n, elem); - erase(begin() + n, end()); - return; - } - // Grow - reserve(n); - std::fill_n(begin(), size(), elem); - if (allocated()) { - UninitializedFill(allocated_space() + size(), allocated_space() + n, - elem); - tag().set_allocated_size(n); - } else { - UninitializedFill(inlined_space() + size(), inlined_space() + n, elem); - tag().set_inline_size(n); - } - } + // Checks if the inlined vector has no elements. + bool empty() const noexcept { return !size(); } - // InlinedVector::size() + // `InlinedVector::size()` // // Returns the number of elements in the inlined vector. size_type size() const noexcept { return tag().size(); } - // InlinedVector::empty() - // - // Checks if the inlined vector has no elements. - bool empty() const noexcept { return (size() == 0); } - - // InlinedVector::capacity() - // - // Returns the number of elements that can be stored in an inlined vector - // without requiring a reallocation of underlying memory. Note that for - // most inlined vectors, `capacity()` should equal its initial size `N`; for - // inlined vectors which exceed this capacity, they will no longer be inlined, - // and `capacity()` will equal its capacity on the allocated heap. - size_type capacity() const noexcept { - return allocated() ? allocation().capacity() : N; - } - - // InlinedVector::max_size() + // `InlinedVector::max_size()` // // Returns the maximum number of elements the vector can hold. size_type max_size() const noexcept { // One bit of the size storage is used to indicate whether the inlined - // vector is allocated; as a result, the maximum size of the container that - // we can express is half of the max for our size type. - return std::numeric_limits::max() / 2; + // vector is allocated. As a result, the maximum size of the container that + // we can express is half of the max for `size_type`. + return (std::numeric_limits::max)() / 2; } - // InlinedVector::data() + // `InlinedVector::capacity()` // - // Returns a const T* pointer to elements of the inlined vector. This pointer - // can be used to access (but not modify) the contained elements. - // Only results within the range `[0,size())` are defined. - const_pointer data() const noexcept { - return allocated() ? allocated_space() : inlined_space(); + // Returns the number of elements that can be stored in the inlined vector + // without requiring a reallocation of underlying memory. + // + // NOTE: For most inlined vectors, `capacity()` should equal + // `inlined_capacity()`. For inlined vectors which exceed this capacity, they + // will no longer be inlined and `capacity()` will equal its capacity on the + // allocated heap. + size_type capacity() const noexcept { + return allocated() ? allocation().capacity() : inlined_capacity(); } - // Overload of InlinedVector::data() to return a T* pointer to elements of the - // inlined vector. This pointer can be used to access and modify the contained - // elements. + // `InlinedVector::data()` + // + // Returns a `pointer` to elements of the inlined vector. This pointer can be + // used to access and modify the contained elements. + // Only results within the range [`0`, `size()`) are defined. pointer data() noexcept { return allocated() ? allocated_space() : inlined_space(); } - // InlinedVector::clear() - // - // Removes all elements from the inlined vector. - void clear() noexcept { - size_type s = size(); - if (allocated()) { - Destroy(allocated_space(), allocated_space() + s); - allocation().Dealloc(allocator()); - } else if (s != 0) { // do nothing for empty vectors - Destroy(inlined_space(), inlined_space() + s); - } - tag() = Tag(); + // Overload of `InlinedVector::data()` to return a `const_pointer` to elements + // of the inlined vector. This pointer can be used to access (but not modify) + // the contained elements. + const_pointer data() const noexcept { + return allocated() ? allocated_space() : inlined_space(); } - // InlinedVector::at() + // `InlinedVector::operator[]()` // - // Returns the ith element of an inlined vector. - const value_type& at(size_type i) const { - if (ABSL_PREDICT_FALSE(i >= size())) { - base_internal::ThrowStdOutOfRange( - "InlinedVector::at failed bounds check"); - } + // Returns a `reference` to the `i`th element of the inlined vector using the + // array operator. + reference operator[](size_type i) { + assert(i < size()); return data()[i]; } - // InlinedVector::operator[] - // - // Returns the ith element of an inlined vector using the array operator. - const value_type& operator[](size_type i) const { + // Overload of `InlinedVector::operator[]()` to return a `const_reference` to + // the `i`th element of the inlined vector. + const_reference operator[](size_type i) const { assert(i < size()); return data()[i]; } - // Overload of InlinedVector::at() to return the ith element of an inlined - // vector. - value_type& at(size_type i) { - if (i >= size()) { + // `InlinedVector::at()` + // + // Returns a `reference` to the `i`th element of the inlined vector. + reference at(size_type i) { + if (ABSL_PREDICT_FALSE(i >= size())) { base_internal::ThrowStdOutOfRange( - "InlinedVector::at failed bounds check"); + "InlinedVector::at() failed bounds check"); } return data()[i]; } - // Overload of InlinedVector::operator[] to return the ith element of an - // inlined vector. - value_type& operator[](size_type i) { - assert(i < size()); + // Overload of `InlinedVector::at()` to return a `const_reference` to the + // `i`th element of the inlined vector. + const_reference at(size_type i) const { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange( + "InlinedVector::at() failed bounds check"); + } return data()[i]; } - // InlinedVector::back() - // - // Returns a reference to the last element of an inlined vector. - value_type& back() { - assert(!empty()); - return at(size() - 1); - } - - // Overload of InlinedVector::back() returns a reference to the last element - // of an inlined vector of const values. - const value_type& back() const { - assert(!empty()); - return at(size() - 1); - } - - // InlinedVector::front() + // `InlinedVector::front()` // - // Returns a reference to the first element of an inlined vector. - value_type& front() { + // Returns a `reference` to the first element of the inlined vector. + reference front() { assert(!empty()); return at(0); } - // Overload of InlinedVector::front() returns a reference to the first element - // of an inlined vector of const values. - const value_type& front() const { + // Overload of `InlinedVector::front()` returns a `const_reference` to the + // first element of the inlined vector. + const_reference front() const { assert(!empty()); return at(0); } - // InlinedVector::emplace_back() + // `InlinedVector::back()` // - // Constructs and appends an object to the inlined vector. - // - // Returns a reference to the inserted element. - template - value_type& emplace_back(Args&&... args) { - size_type s = size(); - assert(s <= capacity()); - if (ABSL_PREDICT_FALSE(s == capacity())) { - return GrowAndEmplaceBack(std::forward(args)...); - } - assert(s < capacity()); - - value_type* space; - if (allocated()) { - tag().set_allocated_size(s + 1); - space = allocated_space(); - } else { - tag().set_inline_size(s + 1); - space = inlined_space(); - } - return Construct(space + s, std::forward(args)...); + // Returns a `reference` to the last element of the inlined vector. + reference back() { + assert(!empty()); + return at(size() - 1); } - // InlinedVector::push_back() - // - // Appends a const element to the inlined vector. - void push_back(const value_type& t) { emplace_back(t); } - - // Overload of InlinedVector::push_back() to append a move-only element to the - // inlined vector. - void push_back(value_type&& t) { emplace_back(std::move(t)); } - - // InlinedVector::pop_back() - // - // Removes the last element (which is destroyed) in the inlined vector. - void pop_back() { + // Overload of `InlinedVector::back()` to return a `const_reference` to the + // last element of the inlined vector. + const_reference back() const { assert(!empty()); - size_type s = size(); - if (allocated()) { - Destroy(allocated_space() + s - 1, allocated_space() + s); - tag().set_allocated_size(s - 1); - } else { - Destroy(inlined_space() + s - 1, inlined_space() + s); - tag().set_inline_size(s - 1); - } + return at(size() - 1); } - // InlinedVector::resize() + // `InlinedVector::begin()` // - // Resizes the inlined vector to contain `n` elements. If `n` is smaller than - // the inlined vector's current size, extra elements are destroyed. If `n` is - // larger than the initial size, new elements are value-initialized. - void resize(size_type n); - - // Overload of InlinedVector::resize() to resize the inlined vector to contain - // `n` elements. If `n` is larger than the current size, enough copies of - // `elem` are appended to increase its size to `n`. - void resize(size_type n, const value_type& elem); - - // InlinedVector::begin() - // - // Returns an iterator to the beginning of the inlined vector. + // Returns an `iterator` to the beginning of the inlined vector. iterator begin() noexcept { return data(); } - // Overload of InlinedVector::begin() for returning a const iterator to the - // beginning of the inlined vector. + // Overload of `InlinedVector::begin()` to return a `const_iterator` to + // the beginning of the inlined vector. const_iterator begin() const noexcept { return data(); } - // InlinedVector::cbegin() - // - // Returns a const iterator to the beginning of the inlined vector. - const_iterator cbegin() const noexcept { return begin(); } - - // InlinedVector::end() + // `InlinedVector::end()` // - // Returns an iterator to the end of the inlined vector. + // Returns an `iterator` to the end of the inlined vector. iterator end() noexcept { return data() + size(); } - // Overload of InlinedVector::end() for returning a const iterator to the end - // of the inlined vector. + // Overload of `InlinedVector::end()` to return a `const_iterator` to the + // end of the inlined vector. const_iterator end() const noexcept { return data() + size(); } - // InlinedVector::cend() + // `InlinedVector::cbegin()` + // + // Returns a `const_iterator` to the beginning of the inlined vector. + const_iterator cbegin() const noexcept { return begin(); } + + // `InlinedVector::cend()` // - // Returns a const iterator to the end of the inlined vector. + // Returns a `const_iterator` to the end of the inlined vector. const_iterator cend() const noexcept { return end(); } - // InlinedVector::rbegin() + // `InlinedVector::rbegin()` // - // Returns a reverse iterator from the end of the inlined vector. + // Returns a `reverse_iterator` from the end of the inlined vector. reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } - // Overload of InlinedVector::rbegin() for returning a const reverse iterator - // from the end of the inlined vector. + // Overload of `InlinedVector::rbegin()` to return a + // `const_reverse_iterator` from the end of the inlined vector. const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); } - // InlinedVector::crbegin() + // `InlinedVector::rend()` // - // Returns a const reverse iterator from the end of the inlined vector. - const_reverse_iterator crbegin() const noexcept { return rbegin(); } - - // InlinedVector::rend() - // - // Returns a reverse iterator from the beginning of the inlined vector. + // Returns a `reverse_iterator` from the beginning of the inlined vector. reverse_iterator rend() noexcept { return reverse_iterator(begin()); } - // Overload of InlinedVector::rend() for returning a const reverse iterator + // Overload of `InlinedVector::rend()` to return a `const_reverse_iterator` // from the beginning of the inlined vector. const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); } - // InlinedVector::crend() + // `InlinedVector::crbegin()` // - // Returns a reverse iterator from the beginning of the inlined vector. + // Returns a `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator crbegin() const noexcept { return rbegin(); } + + // `InlinedVector::crend()` + // + // Returns a `const_reverse_iterator` from the beginning of the inlined + // vector. const_reverse_iterator crend() const noexcept { return rend(); } - // InlinedVector::emplace() + // `InlinedVector::get_allocator()` // - // Constructs and inserts an object to the inlined vector at the given - // `position`, returning an iterator pointing to the newly emplaced element. - template - iterator emplace(const_iterator position, Args&&... args); + // Returns a copy of the allocator of the inlined vector. + allocator_type get_allocator() const { return allocator(); } + + // --------------------------------------------------------------------------- + // InlinedVector Member Mutators + // --------------------------------------------------------------------------- + + // `InlinedVector::operator=()` + // + // Replaces the contents of the inlined vector with copies of the elements in + // the provided `std::initializer_list`. + InlinedVector& operator=(std::initializer_list init_list) { + AssignRange(init_list.begin(), init_list.end(), + IteratorCategory{}); + return *this; + } + + // Overload of `InlinedVector::operator=()` to replace the contents of the + // inlined vector with the contents of `other`. + InlinedVector& operator=(const InlinedVector& other) { + if (ABSL_PREDICT_FALSE(this == &other)) return *this; + + // Optimized to avoid reallocation. + // Prefer reassignment to copy construction for elements. + if (size() < other.size()) { // grow + reserve(other.size()); + std::copy(other.begin(), other.begin() + size(), begin()); + std::copy(other.begin() + size(), other.end(), std::back_inserter(*this)); + } else { // maybe shrink + erase(begin() + other.size(), end()); + std::copy(other.begin(), other.end(), begin()); + } + return *this; + } - // InlinedVector::insert() + // Overload of `InlinedVector::operator=()` to replace the contents of the + // inlined vector with the contents of `other`. // - // Inserts an element of the specified value at `position`, returning an - // iterator pointing to the newly inserted element. - iterator insert(const_iterator position, const value_type& v) { + // NOTE: As a result of calling this overload, `other` may be empty or it's + // contents may be left in a moved-from state. + InlinedVector& operator=(InlinedVector&& other) { + if (ABSL_PREDICT_FALSE(this == &other)) return *this; + + if (other.allocated()) { + clear(); + tag().set_allocated_size(other.size()); + init_allocation(other.allocation()); + other.tag() = Tag(); + } else { + if (allocated()) clear(); + // Both are inlined now. + if (size() < other.size()) { + auto mid = std::make_move_iterator(other.begin() + size()); + std::copy(std::make_move_iterator(other.begin()), mid, begin()); + UninitializedCopy(mid, std::make_move_iterator(other.end()), end()); + } else { + auto new_end = std::copy(std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end()), begin()); + Destroy(new_end, end()); + } + tag().set_inline_size(other.size()); + } + return *this; + } + + // `InlinedVector::assign()` + // + // Replaces the contents of the inlined vector with `n` copies of `v`. + void assign(size_type n, const_reference v) { + if (n <= size()) { // Possibly shrink + std::fill_n(begin(), n, v); + erase(begin() + n, end()); + return; + } + // Grow + reserve(n); + std::fill_n(begin(), size(), v); + if (allocated()) { + UninitializedFill(allocated_space() + size(), allocated_space() + n, v); + tag().set_allocated_size(n); + } else { + UninitializedFill(inlined_space() + size(), inlined_space() + n, v); + tag().set_inline_size(n); + } + } + + // Overload of `InlinedVector::assign()` to replace the contents of the + // inlined vector with copies of the values in the provided + // `std::initializer_list`. + void assign(std::initializer_list init_list) { + AssignRange(init_list.begin(), init_list.end(), + IteratorCategory{}); + } + + // Overload of `InlinedVector::assign()` to replace the contents of the + // inlined vector with values constructed from the range [`first`, `last`). + template * = nullptr> + void assign(InputIterator first, InputIterator last) { + AssignRange(first, last, IteratorCategory{}); + } + + // `InlinedVector::resize()` + // + // Resizes the inlined vector to contain `n` elements. If `n` is smaller than + // the inlined vector's current size, extra elements are destroyed. If `n` is + // larger than the initial size, new elements are value-initialized. + void resize(size_type n); + + // Overload of `InlinedVector::resize()` to resize the inlined vector to + // contain `n` elements where, if `n` is larger than `size()`, the new values + // will be copy-constructed from `v`. + void resize(size_type n, const_reference v); + + // `InlinedVector::insert()` + // + // Copies `v` into `position`, returning an `iterator` pointing to the newly + // inserted element. + iterator insert(const_iterator position, const_reference v) { return emplace(position, v); } - // Overload of InlinedVector::insert() for inserting an element of the - // specified rvalue, returning an iterator pointing to the newly inserted - // element. - iterator insert(const_iterator position, value_type&& v) { + // Overload of `InlinedVector::insert()` for moving `v` into `position`, + // returning an iterator pointing to the newly inserted element. + iterator insert(const_iterator position, rvalue_reference v) { return emplace(position, std::move(v)); } - // Overload of InlinedVector::insert() for inserting `n` elements of the - // specified value at `position`, returning an iterator pointing to the first + // Overload of `InlinedVector::insert()` for inserting `n` contiguous copies + // of `v` starting at `position`. Returns an `iterator` pointing to the first // of the newly inserted elements. - iterator insert(const_iterator position, size_type n, const value_type& v) { + iterator insert(const_iterator position, size_type n, const_reference v) { return InsertWithCount(position, n, v); } - // Overload of `InlinedVector::insert()` to disambiguate the two - // three-argument overloads of `insert()`, returning an iterator pointing to - // the first of the newly inserted elements. + // Overload of `InlinedVector::insert()` for copying the contents of the + // `std::initializer_list` into the vector starting at `position`. Returns an + // `iterator` pointing to the first of the newly inserted elements. + iterator insert(const_iterator position, + std::initializer_list init_list) { + return insert(position, init_list.begin(), init_list.end()); + } + + // Overload of `InlinedVector::insert()` for inserting elements constructed + // from the range [`first`, `last`). Returns an `iterator` pointing to the + // first of the newly inserted elements. + // + // NOTE: The `enable_if` is intended to disambiguate the two three-argument + // overloads of `insert()`. template ::iterator_category, - std::input_iterator_tag>::value>::type> + typename = EnableIfInputIterator> iterator insert(const_iterator position, InputIterator first, InputIterator last) { - using IterType = - typename std::iterator_traits::iterator_category; - return InsertWithRange(position, first, last, IterType()); + return InsertWithRange(position, first, last, + IteratorCategory()); } - // Overload of InlinedVector::insert() for inserting a list of elements at - // `position`, returning an iterator pointing to the first of the newly - // inserted elements. - iterator insert(const_iterator position, - std::initializer_list init) { - return insert(position, init.begin(), init.end()); + // `InlinedVector::emplace()` + // + // Constructs and inserts an object in the inlined vector at the given + // `position`, returning an `iterator` pointing to the newly emplaced element. + template + iterator emplace(const_iterator position, Args&&... args); + + // `InlinedVector::emplace_back()` + // + // Constructs and appends a new element to the end of the inlined vector, + // returning a `reference` to the emplaced element. + template + reference emplace_back(Args&&... args) { + size_type s = size(); + assert(s <= capacity()); + if (ABSL_PREDICT_FALSE(s == capacity())) { + return GrowAndEmplaceBack(std::forward(args)...); + } + assert(s < capacity()); + + pointer space; + if (allocated()) { + tag().set_allocated_size(s + 1); + space = allocated_space(); + } else { + tag().set_inline_size(s + 1); + space = inlined_space(); + } + return Construct(space + s, std::forward(args)...); + } + + // `InlinedVector::push_back()` + // + // Appends a copy of `v` to the end of the inlined vector. + void push_back(const_reference v) { static_cast(emplace_back(v)); } + + // Overload of `InlinedVector::push_back()` for moving `v` into a newly + // appended element. + void push_back(rvalue_reference v) { + static_cast(emplace_back(std::move(v))); + } + + // `InlinedVector::pop_back()` + // + // Destroys the element at the end of the inlined vector and shrinks the size + // by `1` (unless the inlined vector is empty, in which case this is a no-op). + void pop_back() noexcept { + assert(!empty()); + size_type s = size(); + if (allocated()) { + Destroy(allocated_space() + s - 1, allocated_space() + s); + tag().set_allocated_size(s - 1); + } else { + Destroy(inlined_space() + s - 1, inlined_space() + s); + tag().set_inline_size(s - 1); + } } - // InlinedVector::erase() + // `InlinedVector::erase()` // // Erases the element at `position` of the inlined vector, returning an - // iterator pointing to the following element or the container's end if the - // last element was erased. + // `iterator` pointing to the first element following the erased element. + // + // NOTE: May return the end iterator, which is not dereferencable. iterator erase(const_iterator position) { assert(position >= begin()); assert(position < end()); @@ -549,23 +594,36 @@ class InlinedVector { return pos; } - // Overload of InlinedVector::erase() for erasing all elements in the - // iterator range [first, last) in the inlined vector, returning an iterator - // pointing to the first element following the range erased, or the - // container's end if range included the container's last element. - iterator erase(const_iterator first, const_iterator last); + // Overload of `InlinedVector::erase()` for erasing all elements in the + // range [`from`, `to`) in the inlined vector. Returns an `iterator` pointing + // to the first element following the range erased or the end iterator if `to` + // was the end iterator. + iterator erase(const_iterator from, const_iterator to); + + // `InlinedVector::clear()` + // + // Destroys all elements in the inlined vector, sets the size of `0` and + // deallocates the heap allocation if the inlined vector was allocated. + void clear() noexcept { + size_type s = size(); + if (allocated()) { + Destroy(allocated_space(), allocated_space() + s); + allocation().Dealloc(allocator()); + } else if (s != 0) { // do nothing for empty vectors + Destroy(inlined_space(), inlined_space() + s); + } + tag() = Tag(); + } - // InlinedVector::reserve() + // `InlinedVector::reserve()` // // Enlarges the underlying representation of the inlined vector so it can hold // at least `n` elements. This method does not change `size()` or the actual // contents of the vector. // - // Note that if `n` does not exceed the inlined vector's initial size `N`, - // `reserve()` will have no effect; if it does exceed its initial size, - // `reserve()` will trigger an initial allocation and move the inlined vector - // onto the heap. If the vector already exists on the heap and the requested - // size exceeds it, a reallocation will be performed. + // NOTE: If `n` does not exceed `capacity()`, `reserve()` will have no + // effects. Otherwise, `reserve()` will reallocate, performing an n-time + // element-wise move of everything contained. void reserve(size_type n) { if (n > capacity()) { // Make room for new elements @@ -573,26 +631,25 @@ class InlinedVector { } } - // InlinedVector::shrink_to_fit() + // `InlinedVector::shrink_to_fit()` // - // Reduces memory usage by freeing unused memory. - // After this call `capacity()` will be equal to `max(N, size())`. + // Reduces memory usage by freeing unused memory. After this call, calls to + // `capacity()` will be equal to `(std::max)(inlined_capacity(), size())`. // - // If `size() <= N` and the elements are currently stored on the heap, they - // will be moved to the inlined storage and the heap memory deallocated. - // If `size() > N` and `size() < capacity()` the elements will be moved to - // a reallocated storage on heap. + // If `size() <= inlined_capacity()` and the elements are currently stored on + // the heap, they will be moved to the inlined storage and the heap memory + // will be deallocated. + // + // If `size() > inlined_capacity()` and `size() < capacity()` the elements + // will be moved to a smaller heap allocation. void shrink_to_fit() { const auto s = size(); - if (!allocated() || s == capacity()) { - // There's nothing to deallocate. - return; - } + if (ABSL_PREDICT_FALSE(!allocated() || s == capacity())) return; - if (s <= N) { + if (s <= inlined_capacity()) { // Move the elements to the inlined storage. - // We have to do this using a temporary, because inlined_storage and - // allocation_storage are in a union field. + // We have to do this using a temporary, because `inlined_storage` and + // `allocation_storage` are in a union field. auto temp = std::move(*this); assign(std::make_move_iterator(temp.begin()), std::make_move_iterator(temp.end())); @@ -600,8 +657,8 @@ class InlinedVector { } // Reallocate storage and move elements. - // We can't simply use the same approach as above, because assign() would - // call into reserve() internally and reserve larger capacity than we need. + // We can't simply use the same approach as above, because `assign()` would + // call into `reserve()` internally and reserve larger capacity than we need Allocation new_allocation(allocator(), s); UninitializedCopy(std::make_move_iterator(allocated_space()), std::make_move_iterator(allocated_space() + s), @@ -609,118 +666,126 @@ class InlinedVector { ResetAllocation(new_allocation, s); } - // InlinedVector::swap() + // `InlinedVector::swap()` // // Swaps the contents of this inlined vector with the contents of `other`. void swap(InlinedVector& other); - // InlinedVector::get_allocator() - // - // Returns the allocator of this inlined vector. - allocator_type get_allocator() const { return allocator(); } + template + friend Hash AbslHashValue(Hash hash, const InlinedVector& inlined_vector) { + const_pointer p = inlined_vector.data(); + size_type n = inlined_vector.size(); + return Hash::combine(Hash::combine_contiguous(std::move(hash), p, n), n); + } private: - static_assert(N > 0, "inlined vector with nonpositive size"); - - // It holds whether the vector is allocated or not in the lowest bit. - // The size is held in the high bits: - // size_ = (size << 1) | is_allocated; + // Holds whether the vector is allocated or not in the lowest bit and the size + // in the high bits: + // `size_ = (size << 1) | is_allocated;` class Tag { public: Tag() : size_(0) {} - size_type size() const { return size_ >> 1; } - void add_size(size_type n) { size_ += n << 1; } - void set_inline_size(size_type n) { size_ = n << 1; } - void set_allocated_size(size_type n) { size_ = (n << 1) | 1; } - bool allocated() const { return size_ & 1; } + size_type size() const { return size_ / 2; } + void add_size(size_type n) { size_ += n * 2; } + void set_inline_size(size_type n) { size_ = n * 2; } + void set_allocated_size(size_type n) { size_ = (n * 2) + 1; } + bool allocated() const { return size_ % 2; } private: size_type size_; }; - // Derives from allocator_type to use the empty base class optimization. - // If the allocator_type is stateless, we can 'store' - // our instance of it for free. + // Derives from `allocator_type` to use the empty base class optimization. + // If the `allocator_type` is stateless, we can store our instance for free. class AllocatorAndTag : private allocator_type { public: - explicit AllocatorAndTag(const allocator_type& a, Tag t = Tag()) - : allocator_type(a), tag_(t) { - } + explicit AllocatorAndTag(const allocator_type& a) : allocator_type(a) {} + Tag& tag() { return tag_; } const Tag& tag() const { return tag_; } + allocator_type& allocator() { return *this; } const allocator_type& allocator() const { return *this; } + private: Tag tag_; }; class Allocation { public: - Allocation(allocator_type& a, // NOLINT(runtime/references) - size_type capacity) - : capacity_(capacity), - buffer_(AllocatorTraits::allocate(a, capacity_)) {} + Allocation(allocator_type& a, size_type capacity) + : capacity_(capacity), buffer_(Create(a, capacity)) {} - void Dealloc(allocator_type& a) { // NOLINT(runtime/references) - AllocatorTraits::deallocate(a, buffer(), capacity()); + void Dealloc(allocator_type& a) { + std::allocator_traits::deallocate(a, buffer_, capacity_); } size_type capacity() const { return capacity_; } - const value_type* buffer() const { return buffer_; } - value_type* buffer() { return buffer_; } + + const_pointer buffer() const { return buffer_; } + + pointer buffer() { return buffer_; } private: + static pointer Create(allocator_type& a, size_type n) { + return std::allocator_traits::allocate(a, n); + } + size_type capacity_; - value_type* buffer_; + pointer buffer_; }; const Tag& tag() const { return allocator_and_tag_.tag(); } + Tag& tag() { return allocator_and_tag_.tag(); } Allocation& allocation() { return reinterpret_cast(rep_.allocation_storage.allocation); } + const Allocation& allocation() const { return reinterpret_cast( rep_.allocation_storage.allocation); } + void init_allocation(const Allocation& allocation) { new (&rep_.allocation_storage.allocation) Allocation(allocation); } - value_type* inlined_space() { - return reinterpret_cast(&rep_.inlined_storage.inlined); - } - const value_type* inlined_space() const { - return reinterpret_cast(&rep_.inlined_storage.inlined); + // TODO(absl-team): investigate whether the reinterpret_cast is appropriate. + pointer inlined_space() { + return reinterpret_cast( + std::addressof(rep_.inlined_storage.inlined[0])); } - value_type* allocated_space() { - return allocation().buffer(); - } - const value_type* allocated_space() const { - return allocation().buffer(); + const_pointer inlined_space() const { + return reinterpret_cast( + std::addressof(rep_.inlined_storage.inlined[0])); } + pointer allocated_space() { return allocation().buffer(); } + + const_pointer allocated_space() const { return allocation().buffer(); } + const allocator_type& allocator() const { return allocator_and_tag_.allocator(); } - allocator_type& allocator() { - return allocator_and_tag_.allocator(); - } + + allocator_type& allocator() { return allocator_and_tag_.allocator(); } bool allocated() const { return tag().allocated(); } - // Enlarge the underlying representation so we can store size_ + delta elems. - // The size is not changed, and any newly added memory is not initialized. + // Enlarge the underlying representation so we can store `size_ + delta` elems + // in allocated space. The size is not changed, and any newly added memory is + // not initialized. void EnlargeBy(size_type delta); - // Shift all elements from position to end() n places to the right. + // Shift all elements from `position` to `end()` by `n` places to the right. // If the vector needs to be enlarged, memory will be allocated. - // Returns iterators pointing to the start of the previously-initialized + // Returns `iterator`s pointing to the start of the previously-initialized // portion and the start of the uninitialized portion of the created gap. - // The number of initialized spots is pair.second - pair.first; - // the number of raw spots is n - (pair.second - pair.first). + // The number of initialized spots is `pair.second - pair.first`. The number + // of raw spots is `n - (pair.second - pair.first)`. // // Updates the size of the InlinedVector internally. std::pair ShiftRight(const_iterator position, @@ -740,13 +805,13 @@ class InlinedVector { } template - value_type& GrowAndEmplaceBack(Args&&... args) { + reference GrowAndEmplaceBack(Args&&... args) { assert(size() == capacity()); const size_type s = size(); Allocation new_allocation(allocator(), 2 * capacity()); - value_type& new_element = + reference new_element = Construct(new_allocation.buffer() + s, std::forward(args)...); UninitializedCopy(std::make_move_iterator(data()), std::make_move_iterator(data() + s), @@ -758,98 +823,91 @@ class InlinedVector { } void InitAssign(size_type n); - void InitAssign(size_type n, const value_type& t); + + void InitAssign(size_type n, const_reference v); template - value_type& Construct(pointer p, Args&&... args) { - AllocatorTraits::construct(allocator(), p, std::forward(args)...); + reference Construct(pointer p, Args&&... args) { + std::allocator_traits::construct( + allocator(), p, std::forward(args)...); return *p; } - template - void UninitializedCopy(Iter src, Iter src_last, value_type* dst) { + template + void UninitializedCopy(Iterator src, Iterator src_last, pointer dst) { for (; src != src_last; ++dst, ++src) Construct(dst, *src); } template - void UninitializedFill(value_type* dst, value_type* dst_last, - const Args&... args) { + void UninitializedFill(pointer dst, pointer dst_last, const Args&... args) { for (; dst != dst_last; ++dst) Construct(dst, args...); } - // Destroy [ptr, ptr_last) in place. - void Destroy(value_type* ptr, value_type* ptr_last); + // Destroy [`from`, `to`) in place. + void Destroy(pointer from, pointer to); - template - void AppendRange(Iter first, Iter last, std::input_iterator_tag) { - std::copy(first, last, std::back_inserter(*this)); - } + template + void AppendRange(Iterator first, Iterator last, std::forward_iterator_tag); - // Faster path for forward iterators. - template - void AppendRange(Iter first, Iter last, std::forward_iterator_tag); + template + void AppendRange(Iterator first, Iterator last, std::input_iterator_tag); - template - void AppendRange(Iter first, Iter last) { - using IterTag = typename std::iterator_traits::iterator_category; - AppendRange(first, last, IterTag()); - } + template + void AssignRange(Iterator first, Iterator last, std::forward_iterator_tag); - template - void AssignRange(Iter first, Iter last, std::input_iterator_tag); - - // Faster path for forward iterators. - template - void AssignRange(Iter first, Iter last, std::forward_iterator_tag); - - template - void AssignRange(Iter first, Iter last) { - using IterTag = typename std::iterator_traits::iterator_category; - AssignRange(first, last, IterTag()); - } + template + void AssignRange(Iterator first, Iterator last, std::input_iterator_tag); iterator InsertWithCount(const_iterator position, size_type n, - const value_type& v); + const_reference v); - template - iterator InsertWithRange(const_iterator position, InputIter first, - InputIter last, std::input_iterator_tag); - template - iterator InsertWithRange(const_iterator position, ForwardIter first, - ForwardIter last, std::forward_iterator_tag); + template + iterator InsertWithRange(const_iterator position, ForwardIterator first, + ForwardIterator last, std::forward_iterator_tag); - AllocatorAndTag allocator_and_tag_; + template + iterator InsertWithRange(const_iterator position, InputIterator first, + InputIterator last, std::input_iterator_tag); - // Either the inlined or allocated representation + // Stores either the inlined or allocated representation union Rep { - // Use struct to perform indirection that solves a bizarre compilation - // error on Visual Studio (all known versions). - struct { - typename std::aligned_storage::type inlined[N]; - } inlined_storage; - struct { - typename std::aligned_storage::type allocation; - } allocation_storage; - } rep_; + using ValueTypeBuffer = + absl::aligned_storage_t; + using AllocationBuffer = + absl::aligned_storage_t; + + // Structs wrap the buffers to perform indirection that solves a bizarre + // compilation error on Visual Studio (all known versions). + struct InlinedRep { + ValueTypeBuffer inlined[N]; + }; + struct AllocatedRep { + AllocationBuffer allocation; + }; + + InlinedRep inlined_storage; + AllocatedRep allocation_storage; + }; + + AllocatorAndTag allocator_and_tag_; + Rep rep_; }; // ----------------------------------------------------------------------------- // InlinedVector Non-Member Functions // ----------------------------------------------------------------------------- -// swap() +// `swap()` // // Swaps the contents of two inlined vectors. This convenience function -// simply calls InlinedVector::swap(other_inlined_vector). +// simply calls `InlinedVector::swap()`. template void swap(InlinedVector& a, InlinedVector& b) noexcept(noexcept(a.swap(b))) { a.swap(b); } -// operator==() +// `operator==()` // // Tests the equivalency of the contents of two inlined vectors. template @@ -858,7 +916,7 @@ bool operator==(const InlinedVector& a, return absl::equal(a.begin(), a.end(), b.begin(), b.end()); } -// operator!=() +// `operator!=()` // // Tests the inequality of the contents of two inlined vectors. template @@ -867,7 +925,7 @@ bool operator!=(const InlinedVector& a, return !(a == b); } -// operator<() +// `operator<()` // // Tests whether the contents of one inlined vector are less than the contents // of another through a lexicographical comparison operation. @@ -877,7 +935,7 @@ bool operator<(const InlinedVector& a, return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); } -// operator>() +// `operator>()` // // Tests whether the contents of one inlined vector are greater than the // contents of another through a lexicographical comparison operation. @@ -887,7 +945,7 @@ bool operator>(const InlinedVector& a, return b < a; } -// operator<=() +// `operator<=()` // // Tests whether the contents of one inlined vector are less than or equal to // the contents of another through a lexicographical comparison operation. @@ -897,7 +955,7 @@ bool operator<=(const InlinedVector& a, return !(b < a); } -// operator>=() +// `operator>=()` // // Tests whether the contents of one inlined vector are greater than or equal to // the contents of another through a lexicographical comparison operation. @@ -909,97 +967,99 @@ bool operator>=(const InlinedVector& a, // ----------------------------------------------------------------------------- // Implementation of InlinedVector -// ----------------------------------------------------------------------------- // -// Do not depend on any implementation details below this line. +// Do not depend on any below implementation details! +// ----------------------------------------------------------------------------- template -InlinedVector::InlinedVector(const InlinedVector& v) - : allocator_and_tag_(v.allocator()) { - reserve(v.size()); +InlinedVector::InlinedVector(const InlinedVector& other) + : allocator_and_tag_(other.allocator()) { + reserve(other.size()); if (allocated()) { - UninitializedCopy(v.begin(), v.end(), allocated_space()); - tag().set_allocated_size(v.size()); + UninitializedCopy(other.begin(), other.end(), allocated_space()); + tag().set_allocated_size(other.size()); } else { - UninitializedCopy(v.begin(), v.end(), inlined_space()); - tag().set_inline_size(v.size()); + UninitializedCopy(other.begin(), other.end(), inlined_space()); + tag().set_inline_size(other.size()); } } template -InlinedVector::InlinedVector(const InlinedVector& v, +InlinedVector::InlinedVector(const InlinedVector& other, const allocator_type& alloc) : allocator_and_tag_(alloc) { - reserve(v.size()); + reserve(other.size()); if (allocated()) { - UninitializedCopy(v.begin(), v.end(), allocated_space()); - tag().set_allocated_size(v.size()); + UninitializedCopy(other.begin(), other.end(), allocated_space()); + tag().set_allocated_size(other.size()); } else { - UninitializedCopy(v.begin(), v.end(), inlined_space()); - tag().set_inline_size(v.size()); + UninitializedCopy(other.begin(), other.end(), inlined_space()); + tag().set_inline_size(other.size()); } } template -InlinedVector::InlinedVector(InlinedVector&& v) noexcept( +InlinedVector::InlinedVector(InlinedVector&& other) noexcept( absl::allocator_is_nothrow::value || std::is_nothrow_move_constructible::value) - : allocator_and_tag_(v.allocator_and_tag_) { - if (v.allocated()) { + : allocator_and_tag_(other.allocator_and_tag_) { + if (other.allocated()) { // We can just steal the underlying buffer from the source. // That leaves the source empty, so we clear its size. - init_allocation(v.allocation()); - v.tag() = Tag(); + init_allocation(other.allocation()); + other.tag() = Tag(); } else { - UninitializedCopy(std::make_move_iterator(v.inlined_space()), - std::make_move_iterator(v.inlined_space() + v.size()), - inlined_space()); + UninitializedCopy( + std::make_move_iterator(other.inlined_space()), + std::make_move_iterator(other.inlined_space() + other.size()), + inlined_space()); } } template -InlinedVector::InlinedVector( - InlinedVector&& v, - const allocator_type& - alloc) noexcept(absl::allocator_is_nothrow::value) +InlinedVector::InlinedVector(InlinedVector&& other, + const allocator_type& alloc) noexcept( // + absl::allocator_is_nothrow::value) : allocator_and_tag_(alloc) { - if (v.allocated()) { - if (alloc == v.allocator()) { + if (other.allocated()) { + if (alloc == other.allocator()) { // We can just steal the allocation from the source. - tag() = v.tag(); - init_allocation(v.allocation()); - v.tag() = Tag(); + tag() = other.tag(); + init_allocation(other.allocation()); + other.tag() = Tag(); } else { // We need to use our own allocator - reserve(v.size()); - UninitializedCopy(std::make_move_iterator(v.begin()), - std::make_move_iterator(v.end()), allocated_space()); - tag().set_allocated_size(v.size()); + reserve(other.size()); + UninitializedCopy(std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end()), + allocated_space()); + tag().set_allocated_size(other.size()); } } else { - UninitializedCopy(std::make_move_iterator(v.inlined_space()), - std::make_move_iterator(v.inlined_space() + v.size()), - inlined_space()); - tag().set_inline_size(v.size()); + UninitializedCopy( + std::make_move_iterator(other.inlined_space()), + std::make_move_iterator(other.inlined_space() + other.size()), + inlined_space()); + tag().set_inline_size(other.size()); } } template -void InlinedVector::InitAssign(size_type n, const value_type& t) { - if (n > static_cast(N)) { +void InlinedVector::InitAssign(size_type n, const_reference v) { + if (n > inlined_capacity()) { Allocation new_allocation(allocator(), n); init_allocation(new_allocation); - UninitializedFill(allocated_space(), allocated_space() + n, t); + UninitializedFill(allocated_space(), allocated_space() + n, v); tag().set_allocated_size(n); } else { - UninitializedFill(inlined_space(), inlined_space() + n, t); + UninitializedFill(inlined_space(), inlined_space() + n, v); tag().set_inline_size(n); } } template void InlinedVector::InitAssign(size_type n) { - if (n > static_cast(N)) { + if (n > inlined_capacity()) { Allocation new_allocation(allocator(), n); init_allocation(new_allocation); UninitializedFill(allocated_space(), allocated_space() + n); @@ -1031,7 +1091,7 @@ void InlinedVector::resize(size_type n) { } template -void InlinedVector::resize(size_type n, const value_type& elem) { +void InlinedVector::resize(size_type n, const_reference v) { size_type s = size(); if (n < s) { erase(begin() + n, end()); @@ -1040,23 +1100,23 @@ void InlinedVector::resize(size_type n, const value_type& elem) { reserve(n); assert(capacity() >= n); - // Fill new space with copies of 'elem'. + // Fill new space with copies of 'v'. if (allocated()) { - UninitializedFill(allocated_space() + s, allocated_space() + n, elem); + UninitializedFill(allocated_space() + s, allocated_space() + n, v); tag().set_allocated_size(n); } else { - UninitializedFill(inlined_space() + s, inlined_space() + n, elem); + UninitializedFill(inlined_space() + s, inlined_space() + n, v); tag().set_inline_size(n); } } template template -typename InlinedVector::iterator InlinedVector::emplace( - const_iterator position, Args&&... args) { +auto InlinedVector::emplace(const_iterator position, Args&&... args) + -> iterator { assert(position >= begin()); assert(position <= end()); - if (position == end()) { + if (ABSL_PREDICT_FALSE(position == end())) { emplace_back(std::forward(args)...); return end() - 1; } @@ -1076,14 +1136,14 @@ typename InlinedVector::iterator InlinedVector::emplace( } template -typename InlinedVector::iterator InlinedVector::erase( - const_iterator first, const_iterator last) { - assert(begin() <= first); - assert(first <= last); - assert(last <= end()); +auto InlinedVector::erase(const_iterator from, const_iterator to) + -> iterator { + assert(begin() <= from); + assert(from <= to); + assert(to <= end()); - iterator range_start = const_cast(first); - iterator range_end = const_cast(last); + iterator range_start = const_cast(from); + iterator range_end = const_cast(to); size_type s = size(); ptrdiff_t erase_gap = std::distance(range_start, range_end); @@ -1104,10 +1164,9 @@ typename InlinedVector::iterator InlinedVector::erase( template void InlinedVector::swap(InlinedVector& other) { - using std::swap; // Augment ADL with std::swap. - if (&other == this) { - return; - } + using std::swap; // Augment ADL with `std::swap`. + if (ABSL_PREDICT_FALSE(this == &other)) return; + if (allocated() && other.allocated()) { // Both out of line, so just swap the tag, allocation, and allocator. swap(tag(), other.tag()); @@ -1126,12 +1185,12 @@ void InlinedVector::swap(InlinedVector& other) { const size_type a_size = a->size(); const size_type b_size = b->size(); assert(a_size >= b_size); - // 'a' is larger. Swap the elements up to the smaller array size. - std::swap_ranges(a->inlined_space(), - a->inlined_space() + b_size, + // `a` is larger. Swap the elements up to the smaller array size. + std::swap_ranges(a->inlined_space(), a->inlined_space() + b_size, b->inlined_space()); - // Move the remaining elements: A[b_size,a_size) -> B[b_size,a_size) + // Move the remaining elements: + // [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b` b->UninitializedCopy(a->inlined_space() + b_size, a->inlined_space() + a_size, b->inlined_space() + b_size); @@ -1143,6 +1202,7 @@ void InlinedVector::swap(InlinedVector& other) { assert(a->size() == b_size); return; } + // One is out of line, one is inline. // We first move the elements from the inlined vector into the // inlined space in the other vector. We then put the other vector's @@ -1157,13 +1217,13 @@ void InlinedVector::swap(InlinedVector& other) { assert(b->allocated()); const size_type a_size = a->size(); const size_type b_size = b->size(); - // In an optimized build, b_size would be unused. - (void)b_size; + // In an optimized build, `b_size` would be unused. + static_cast(b_size); - // Made Local copies of size(), don't need tag() accurate anymore + // Made Local copies of `size()`, don't need `tag()` accurate anymore swap(a->tag(), b->tag()); - // Copy b_allocation out before b's union gets clobbered by inline_space. + // Copy `b_allocation` out before `b`'s union gets clobbered by `inline_space` Allocation b_allocation = b->allocation(); b->UninitializedCopy(a->inlined_space(), a->inlined_space() + a_size, @@ -1185,7 +1245,7 @@ void InlinedVector::EnlargeBy(size_type delta) { const size_type s = size(); assert(s <= capacity()); - size_type target = std::max(static_cast(N), s + delta); + size_type target = std::max(inlined_capacity(), s + delta); // Compute new capacity by repeatedly doubling current capacity // TODO(psrc): Check and avoid overflow? @@ -1217,7 +1277,7 @@ auto InlinedVector::ShiftRight(const_iterator position, size_type n) while (new_capacity < required_size) { new_capacity <<= 1; } - // Move everyone into the new allocation, leaving a gap of n for the + // Move everyone into the new allocation, leaving a gap of `n` for the // requested shift. Allocation new_allocation(allocator(), new_capacity); size_type index = position - begin(); @@ -1235,8 +1295,8 @@ auto InlinedVector::ShiftRight(const_iterator position, size_type n) start_used = start_raw; } else { // If we had enough space, it's a two-part move. Elements going into - // previously-unoccupied space need an UninitializedCopy. Elements - // going into a previously-occupied space are just a move. + // previously-unoccupied space need an `UninitializedCopy()`. Elements + // going into a previously-occupied space are just a `std::move()`. iterator pos = const_cast(position); iterator raw_space = end(); size_type slots_in_used_space = raw_space - pos; @@ -1262,28 +1322,26 @@ auto InlinedVector::ShiftRight(const_iterator position, size_type n) } template -void InlinedVector::Destroy(value_type* ptr, value_type* ptr_last) { - for (value_type* p = ptr; p != ptr_last; ++p) { - AllocatorTraits::destroy(allocator(), p); +void InlinedVector::Destroy(pointer from, pointer to) { + for (pointer cur = from; cur != to; ++cur) { + std::allocator_traits::destroy(allocator(), cur); } - - // Overwrite unused memory with 0xab so we can catch uninitialized usage. - // Cast to void* to tell the compiler that we don't care that we might be - // scribbling on a vtable pointer. #ifndef NDEBUG - if (ptr != ptr_last) { - memset(reinterpret_cast(ptr), 0xab, - sizeof(*ptr) * (ptr_last - ptr)); + // Overwrite unused memory with `0xab` so we can catch uninitialized usage. + // Cast to `void*` to tell the compiler that we don't care that we might be + // scribbling on a vtable pointer. + if (from != to) { + auto len = sizeof(value_type) * std::distance(from, to); + std::memset(reinterpret_cast(from), 0xab, len); } #endif } template -template -void InlinedVector::AppendRange(Iter first, Iter last, +template +void InlinedVector::AppendRange(Iterator first, Iterator last, std::forward_iterator_tag) { - using Length = typename std::iterator_traits::difference_type; - Length length = std::distance(first, last); + auto length = std::distance(first, last); reserve(size() + length); if (allocated()) { UninitializedCopy(first, last, allocated_space() + size()); @@ -1295,24 +1353,17 @@ void InlinedVector::AppendRange(Iter first, Iter last, } template -template -void InlinedVector::AssignRange(Iter first, Iter last, +template +void InlinedVector::AppendRange(Iterator first, Iterator last, std::input_iterator_tag) { - // Optimized to avoid reallocation. - // Prefer reassignment to copy construction for elements. - iterator out = begin(); - for ( ; first != last && out != end(); ++first, ++out) - *out = *first; - erase(out, end()); std::copy(first, last, std::back_inserter(*this)); } template -template -void InlinedVector::AssignRange(Iter first, Iter last, +template +void InlinedVector::AssignRange(Iterator first, Iterator last, std::forward_iterator_tag) { - using Length = typename std::iterator_traits::difference_type; - Length length = std::distance(first, last); + auto length = std::distance(first, last); // Prefer reassignment to copy construction for elements. if (static_cast(length) <= size()) { erase(std::copy(first, last, begin()), end()); @@ -1330,12 +1381,26 @@ void InlinedVector::AssignRange(Iter first, Iter last, } } +template +template +void InlinedVector::AssignRange(Iterator first, Iterator last, + std::input_iterator_tag) { + // Optimized to avoid reallocation. + // Prefer reassignment to copy construction for elements. + iterator out = begin(); + for (; first != last && out != end(); ++first, ++out) { + *out = *first; + } + erase(out, end()); + std::copy(first, last, std::back_inserter(*this)); +} + template auto InlinedVector::InsertWithCount(const_iterator position, - size_type n, const value_type& v) + size_type n, const_reference v) -> iterator { assert(position >= begin() && position <= end()); - if (n == 0) return const_cast(position); + if (ABSL_PREDICT_FALSE(n == 0)) return const_cast(position); value_type copy = v; std::pair it_pair = ShiftRight(position, n); @@ -1346,41 +1411,39 @@ auto InlinedVector::InsertWithCount(const_iterator position, } template -template +template auto InlinedVector::InsertWithRange(const_iterator position, - InputIter first, InputIter last, - std::input_iterator_tag) - -> iterator { - assert(position >= begin() && position <= end()); - size_type index = position - cbegin(); - size_type i = index; - while (first != last) insert(begin() + i++, *first++); - return begin() + index; -} - -// Overload of InlinedVector::InsertWithRange() -template -template -auto InlinedVector::InsertWithRange(const_iterator position, - ForwardIter first, - ForwardIter last, + ForwardIterator first, + ForwardIterator last, std::forward_iterator_tag) -> iterator { assert(position >= begin() && position <= end()); - if (first == last) { - return const_cast(position); - } - using Length = typename std::iterator_traits::difference_type; - Length n = std::distance(first, last); + if (ABSL_PREDICT_FALSE(first == last)) return const_cast(position); + + auto n = std::distance(first, last); std::pair it_pair = ShiftRight(position, n); size_type used_spots = it_pair.second - it_pair.first; - ForwardIter open_spot = std::next(first, used_spots); + ForwardIterator open_spot = std::next(first, used_spots); std::copy(first, open_spot, it_pair.first); UninitializedCopy(open_spot, last, it_pair.second); return it_pair.first; } -} // inline namespace lts_2018_06_20 +template +template +auto InlinedVector::InsertWithRange(const_iterator position, + InputIterator first, + InputIterator last, + std::input_iterator_tag) + -> iterator { + assert(position >= begin() && position <= end()); + size_type index = position - cbegin(); + size_type i = index; + while (first != last) insert(begin() + i++, *first++); + return begin() + index; +} + +} // inline namespace lts_2018_12_18 } // namespace absl #endif // ABSL_CONTAINER_INLINED_VECTOR_H_ diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index 24f21749..a3ad0f8a 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -66,7 +66,7 @@ BENCHMARK(BM_StdVectorFill)->Range(0, 1024); // The purpose of the next two benchmarks is to verify that // absl::InlinedVector is efficient when moving is more efficent than // copying. To do so, we use strings that are larger than the short -// std::string optimization. +// string optimization. bool StringRepresentedInline(std::string s) { const char* chars = s.data(); std::string s1 = std::move(s); diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc index 26a7d5bc..3a1ea8ac 100644 --- a/absl/container/inlined_vector_test.cc +++ b/absl/container/inlined_vector_test.cc @@ -31,6 +31,7 @@ #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/internal/test_instance_tracker.h" +#include "absl/hash/hash_testing.h" #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" @@ -905,6 +906,8 @@ TYPED_TEST_P(InstanceTest, Swap) { InstanceTracker tracker; InstanceVec a, b; const size_t inlined_capacity = a.capacity(); + auto min_len = std::min(l1, l2); + auto max_len = std::max(l1, l2); for (int i = 0; i < l1; i++) a.push_back(Instance(i)); for (int i = 0; i < l2; i++) b.push_back(Instance(100+i)); EXPECT_EQ(tracker.instances(), l1 + l2); @@ -918,15 +921,15 @@ TYPED_TEST_P(InstanceTest, Swap) { EXPECT_EQ(tracker.swaps(), 0); // Allocations are swapped. EXPECT_EQ(tracker.moves(), 0); } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) { - EXPECT_EQ(tracker.swaps(), std::min(l1, l2)); - // TODO(bsamwel): This should use moves when the type is movable. - EXPECT_EQ(tracker.copies(), std::max(l1, l2) - std::min(l1, l2)); + EXPECT_EQ(tracker.swaps(), min_len); + EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), + max_len - min_len); } else { // One is allocated and the other isn't. The allocation is transferred // without copying elements, and the inlined instances are copied/moved. EXPECT_EQ(tracker.swaps(), 0); - // TODO(bsamwel): This should use moves when the type is movable. - EXPECT_EQ(tracker.copies(), std::min(l1, l2)); + EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), + min_len); } EXPECT_EQ(l1, b.size()); @@ -1725,42 +1728,87 @@ TEST(AllocatorSupportTest, ScopedAllocatorWorks) { std::scoped_allocator_adaptor>; using AllocVec = absl::InlinedVector; + // MSVC 2017's std::vector allocates different amounts of memory in debug + // versus opt mode. + int64_t test_allocated = 0; + StdVector v(CountingAllocator{&test_allocated}); + // The amount of memory allocated by a default constructed vector + auto default_std_vec_allocated = test_allocated; + v.push_back(1); + // The amound of memory allocated by a copy-constructed vector with one + // element. + int64_t one_element_std_vec_copy_allocated = test_allocated; + int64_t allocated = 0; AllocVec vec(MyAlloc{CountingAllocator{&allocated}}); EXPECT_EQ(allocated, 0); // This default constructs a vector, but the allocator should pass itself - // into the vector. + // into the vector, so check allocation compared to that. // The absl::InlinedVector does not allocate any memory. - // The vector does not allocate any memory. + // The vector may allocate any memory. + auto expected = default_std_vec_allocated; vec.resize(1); - EXPECT_EQ(allocated, 0); + EXPECT_EQ(allocated, expected); // We make vector allocate memory. // It must go through the allocator even though we didn't construct the - // vector directly. + // vector directly. This assumes that vec[0] doesn't need to grow its + // allocation. + expected += sizeof(int); vec[0].push_back(1); - EXPECT_EQ(allocated, sizeof(int) * 1); + EXPECT_EQ(allocated, expected); // Another allocating vector. + expected += one_element_std_vec_copy_allocated; vec.push_back(vec[0]); - EXPECT_EQ(allocated, sizeof(int) * 2); + EXPECT_EQ(allocated, expected); // Overflow the inlined memory. // The absl::InlinedVector will now allocate. + expected += sizeof(StdVector) * 8 + default_std_vec_allocated * 3; vec.resize(5); - EXPECT_EQ(allocated, sizeof(int) * 2 + sizeof(StdVector) * 8); + EXPECT_EQ(allocated, expected); // Adding one more in external mode should also work. + expected += one_element_std_vec_copy_allocated; vec.push_back(vec[0]); - EXPECT_EQ(allocated, sizeof(int) * 3 + sizeof(StdVector) * 8); + EXPECT_EQ(allocated, expected); - // And extending these should still work. + // And extending these should still work. This assumes that vec[0] does not + // need to grow its allocation. + expected += sizeof(int); vec[0].push_back(1); - EXPECT_EQ(allocated, sizeof(int) * 4 + sizeof(StdVector) * 8); + EXPECT_EQ(allocated, expected); vec.clear(); EXPECT_EQ(allocated, 0); } +TEST(AllocatorSupportTest, SizeAllocConstructor) { + constexpr int inlined_size = 4; + using Alloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + + { + auto len = inlined_size / 2; + int64_t allocated = 0; + auto v = AllocVec(len, Alloc(&allocated)); + + // Inline storage used; allocator should not be invoked + EXPECT_THAT(allocated, 0); + EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); + } + + { + auto len = inlined_size * 2; + int64_t allocated = 0; + auto v = AllocVec(len, Alloc(&allocated)); + + // Out of line storage used; allocation of 8 elements expected + EXPECT_THAT(allocated, len * sizeof(int)); + EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); + } +} + } // anonymous namespace diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h new file mode 100644 index 00000000..29fe7c12 --- /dev/null +++ b/absl/container/internal/compressed_tuple.h @@ -0,0 +1,177 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Helper class to perform the Empty Base Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the optimization. If all types in Ts are empty +// classes, then CompressedTuple is itself an empty class. +// +// To access the members, use member get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// http://en.cppreference.com/w/cpp/language/ebo + +#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ +#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ + +#include +#include +#include + +#include "absl/utility/utility.h" + +#ifdef _MSC_VER +// We need to mark these classes with this declspec to ensure that +// CompressedTuple happens. +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases) +#else // _MSC_VER +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC +#endif // _MSC_VER + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +template +class CompressedTuple; + +namespace internal_compressed_tuple { + +template +struct Elem; +template +struct Elem, I> + : std::tuple_element> {}; +template +using ElemT = typename Elem::type; + +// Use the __is_final intrinsic if available. Where it's not available, classes +// declared with the 'final' specifier cannot be used as CompressedTuple +// elements. +// TODO(sbenza): Replace this with std::is_final in C++14. +template +constexpr bool IsFinal() { +#if defined(__clang__) || defined(__GNUC__) + return __is_final(T); +#else + return false; +#endif +} + +template +constexpr bool ShouldUseBase() { + return std::is_class::value && std::is_empty::value && !IsFinal(); +} + +// The storage class provides two specializations: +// - For empty classes, it stores T as a base class. +// - For everything else, it stores T as a member. +template >()> +struct Storage { + using T = ElemT; + T value; + constexpr Storage() = default; + explicit constexpr Storage(T&& v) : value(absl::forward(v)) {} + constexpr const T& get() const { return value; } + T& get() { return value; } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage + : ElemT { + using T = internal_compressed_tuple::ElemT; + constexpr Storage() = default; + explicit constexpr Storage(T&& v) : T(absl::forward(v)) {} + constexpr const T& get() const { return *this; } + T& get() { return *this; } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + CompressedTupleImpl, absl::index_sequence> + // We use the dummy identity function through std::integral_constant to + // convince MSVC of accepting and expanding I in that context. Without it + // you would get: + // error C3548: 'I': parameter pack cannot be used in this context + : Storage, + std::integral_constant::value>... { + constexpr CompressedTupleImpl() = default; + explicit constexpr CompressedTupleImpl(Ts&&... args) + : Storage, I>(absl::forward(args))... {} +}; + +} // namespace internal_compressed_tuple + +// Helper class to perform the Empty Base Class Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the CompressedTuple. If all types in Ts are +// empty classes, then CompressedTuple is itself an empty class. +// +// To access the members, use member .get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// http://en.cppreference.com/w/cpp/language/ebo +template +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple + : private internal_compressed_tuple::CompressedTupleImpl< + CompressedTuple, absl::index_sequence_for> { + private: + template + using ElemT = internal_compressed_tuple::ElemT; + + public: + constexpr CompressedTuple() = default; + explicit constexpr CompressedTuple(Ts... base) + : CompressedTuple::CompressedTupleImpl(absl::forward(base)...) {} + + template + ElemT& get() { + return internal_compressed_tuple::Storage::get(); + } + + template + constexpr const ElemT& get() const { + return internal_compressed_tuple::Storage::get(); + } +}; + +// Explicit specialization for a zero-element tuple +// (needed to avoid ambiguous overloads for the default constructor). +template <> +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + +#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc new file mode 100644 index 00000000..2b5ed4a4 --- /dev/null +++ b/absl/container/internal/compressed_tuple_test.cc @@ -0,0 +1,168 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/compressed_tuple.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +template +struct Empty {}; + +template +struct NotEmpty { + T value; +}; + +template +struct TwoValues { + T value1; + U value2; +}; + +TEST(CompressedTupleTest, Sizeof) { + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple, Empty<1>>)); + EXPECT_EQ(sizeof(int), + sizeof(CompressedTuple, Empty<1>, Empty<2>>)); + + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty, Empty<1>>)); +} + +TEST(CompressedTupleTest, Access) { + struct S { + std::string x; + }; + CompressedTuple, S> x(7, {}, S{"ABC"}); + EXPECT_EQ(sizeof(x), sizeof(TwoValues)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_EQ("ABC", x.get<2>().x); +} + +TEST(CompressedTupleTest, NonClasses) { + CompressedTuple x(7, "ABC"); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); +} + +TEST(CompressedTupleTest, MixClassAndNonClass) { + CompressedTuple, NotEmpty> x(7, "ABC", {}, + {1.25}); + struct Mock { + int v; + const char* p; + double d; + }; + EXPECT_EQ(sizeof(x), sizeof(Mock)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); + EXPECT_EQ(1.25, x.get<3>().value); +} + +TEST(CompressedTupleTest, Nested) { + CompressedTuple, + CompressedTuple>> + x(1, CompressedTuple(2), + CompressedTuple>(3, CompressedTuple(4))); + EXPECT_EQ(1, x.get<0>()); + EXPECT_EQ(2, x.get<1>().get<0>()); + EXPECT_EQ(3, x.get<2>().get<0>()); + EXPECT_EQ(4, x.get<2>().get<1>().get<0>()); + + CompressedTuple, Empty<0>, + CompressedTuple, CompressedTuple>>> + y; + std::set*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(), + &y.get<2>().get<1>().get<0>()}; +#ifdef _MSC_VER + // MSVC has a bug where many instances of the same base class are layed out in + // the same address when using __declspec(empty_bases). + // This will be fixed in a future version of MSVC. + int expected = 1; +#else + int expected = 4; +#endif + EXPECT_EQ(expected, sizeof(y)); + EXPECT_EQ(expected, empties.size()); + EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size()); + + EXPECT_EQ(4 * sizeof(char), + sizeof(CompressedTuple, + CompressedTuple>)); + EXPECT_TRUE( + (std::is_empty>, + CompressedTuple>>>::value)); +} + +TEST(CompressedTupleTest, Reference) { + int i = 7; + std::string s = "Very long std::string that goes in the heap"; + CompressedTuple x(i, i, s, s); + + // Sanity check. We should have not moved from `s` + EXPECT_EQ(s, "Very long std::string that goes in the heap"); + + EXPECT_EQ(x.get<0>(), x.get<1>()); + EXPECT_NE(&x.get<0>(), &x.get<1>()); + EXPECT_EQ(&x.get<1>(), &i); + + EXPECT_EQ(x.get<2>(), x.get<3>()); + EXPECT_NE(&x.get<2>(), &x.get<3>()); + EXPECT_EQ(&x.get<3>(), &s); +} + +TEST(CompressedTupleTest, NoElements) { + CompressedTuple<> x; + static_cast(x); // Silence -Wunused-variable. + EXPECT_TRUE(std::is_empty>::value); +} + +TEST(CompressedTupleTest, Constexpr) { + constexpr CompressedTuple> x( + 7, 1.25, CompressedTuple(5)); + constexpr int x0 = x.get<0>(); + constexpr double x1 = x.get<1>(); + constexpr int x2 = x.get<2>().get<0>(); + EXPECT_EQ(x0, 7); + EXPECT_EQ(x1, 1.25); + EXPECT_EQ(x2, 5); +} + +#if defined(__clang__) || defined(__GNUC__) +TEST(CompressedTupleTest, EmptyFinalClass) { + struct S final { + int f() const { return 5; } + }; + CompressedTuple x; + EXPECT_EQ(x.get<0>().f(), 5); +} +#endif + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h new file mode 100644 index 00000000..ddccbe05 --- /dev/null +++ b/absl/container/internal/container_memory.h @@ -0,0 +1,407 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ + +#ifdef ADDRESS_SANITIZER +#include +#endif + +#ifdef MEMORY_SANITIZER +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include "absl/memory/memory.h" +#include "absl/utility/utility.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// Allocates at least n bytes aligned to the specified alignment. +// Alignment must be a power of 2. It must be positive. +// +// Note that many allocators don't honor alignment requirements above certain +// threshold (usually either alignof(std::max_align_t) or alignof(void*)). +// Allocate() doesn't apply alignment corrections. If the underlying allocator +// returns insufficiently alignment pointer, that's what you are going to get. +template +void* Allocate(Alloc* alloc, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + struct alignas(Alignment) M {}; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + A mem_alloc(*alloc); + void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + assert(reinterpret_cast(p) % Alignment == 0 && + "allocator does not respect alignment"); + return p; +} + +// The pointer must have been previously obtained by calling +// Allocate(alloc, n). +template +void Deallocate(Alloc* alloc, void* p, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + struct alignas(Alignment) M {}; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + A mem_alloc(*alloc); + AT::deallocate(mem_alloc, static_cast(p), + (n + sizeof(M) - 1) / sizeof(M)); +} + +namespace memory_internal { + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, + absl::index_sequence) { + absl::allocator_traits::construct( + *alloc, ptr, std::get(std::forward(t))...); +} + +template +struct WithConstructedImplF { + template + decltype(std::declval()(std::declval())) operator()( + Args&&... args) const { + return std::forward(f)(T(std::forward(args)...)); + } + F&& f; +}; + +template +decltype(std::declval()(std::declval())) WithConstructedImpl( + Tuple&& t, absl::index_sequence, F&& f) { + return WithConstructedImplF{std::forward(f)}( + std::get(std::forward(t))...); +} + +template +auto TupleRefImpl(T&& t, absl::index_sequence) + -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { + return std::forward_as_tuple(std::get(std::forward(t))...); +} + +// Returns a tuple of references to the elements of the input tuple. T must be a +// tuple. +template +auto TupleRef(T&& t) -> decltype( + TupleRefImpl(std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>())) { + return TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +template +decltype(std::declval()(std::declval(), std::piecewise_construct, + std::declval>(), std::declval())) +DecomposePairImpl(F&& f, std::pair, V> p) { + const auto& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); +} + +} // namespace memory_internal + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +// Constructs T using the args specified in the tuple and calls F with the +// constructed value. +template +decltype(std::declval()(std::declval())) WithConstructed( + Tuple&& t, F&& f) { + return memory_internal::WithConstructedImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>(), + std::forward(f)); +} + +// Given arguments of an std::pair's consructor, PairArgs() returns a pair of +// tuples with references to the passed arguments. The tuples contain +// constructor arguments for the first and the second elements of the pair. +// +// The following two snippets are equivalent. +// +// 1. std::pair p(args...); +// +// 2. auto a = PairArgs(args...); +// std::pair p(std::piecewise_construct, +// std::move(p.first), std::move(p.second)); +inline std::pair, std::tuple<>> PairArgs() { return {}; } +template +std::pair, std::tuple> PairArgs(F&& f, S&& s) { + return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), + std::forward_as_tuple(std::forward(s))}; +} +template +std::pair, std::tuple> PairArgs( + const std::pair& p) { + return PairArgs(p.first, p.second); +} +template +std::pair, std::tuple> PairArgs(std::pair&& p) { + return PairArgs(std::forward(p.first), std::forward(p.second)); +} +template +auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s)))) { + return std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s))); +} + +// A helper function for implementing apply() in map policies. +template +auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...))) { + return memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...)); +} + +// A helper function for implementing apply() in set policies. +template +decltype(std::declval()(std::declval(), std::declval())) +DecomposeValue(F&& f, Arg&& arg) { + const auto& key = arg; + return std::forward(f)(key, std::forward(arg)); +} + +// Helper functions for asan and msan. +inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { +#ifdef ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(m, s); +#endif +#ifdef MEMORY_SANITIZER + __msan_poison(m, s); +#endif + (void)m; + (void)s; +} + +inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(m, s); +#endif +#ifdef MEMORY_SANITIZER + __msan_unpoison(m, s); +#endif + (void)m; + (void)s; +} + +template +inline void SanitizerPoisonObject(const T* object) { + SanitizerPoisonMemoryRegion(object, sizeof(T)); +} + +template +inline void SanitizerUnpoisonObject(const T* object) { + SanitizerUnpoisonMemoryRegion(object, sizeof(T)); +} + +namespace memory_internal { + +// If Pair is a standard-layout type, OffsetOf::kFirst and +// OffsetOf::kSecond are equivalent to offsetof(Pair, first) and +// offsetof(Pair, second) respectively. Otherwise they are -1. +// +// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout +// type, which is non-portable. +template +struct OffsetOf { + static constexpr size_t kFirst = -1; + static constexpr size_t kSecond = -1; +}; + +template +struct OffsetOf::type> { + static constexpr size_t kFirst = offsetof(Pair, first); + static constexpr size_t kSecond = offsetof(Pair, second); +}; + +template +struct IsLayoutCompatible { + private: + struct Pair { + K first; + V second; + }; + + // Is P layout-compatible with Pair? + template + static constexpr bool LayoutCompatible() { + return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && + alignof(P) == alignof(Pair) && + memory_internal::OffsetOf

::kFirst == + memory_internal::OffsetOf::kFirst && + memory_internal::OffsetOf

::kSecond == + memory_internal::OffsetOf::kSecond; + } + + public: + // Whether pair and pair are layout-compatible. If they are, + // then it is safe to store them in a union and read from either. + static constexpr bool value = std::is_standard_layout() && + std::is_standard_layout() && + memory_internal::OffsetOf::kFirst == 0 && + LayoutCompatible>() && + LayoutCompatible>(); +}; + +} // namespace memory_internal + +// If kMutableKeys is false, only the value member is accessed. +// +// If kMutableKeys is true, key is accessed through all slots while value and +// mutable_value are accessed only via INITIALIZED slots. Slots are created and +// destroyed via mutable_value so that the key can be moved later. +template +union slot_type { + private: + static void emplace(slot_type* slot) { + // The construction of union doesn't do anything at runtime but it allows us + // to access its members without violating aliasing rules. + new (slot) slot_type; + } + // If pair and pair are layout-compatible, we can accept one + // or the other via slot_type. We are also free to access the key via + // slot_type::key in this case. + using kMutableKeys = + std::integral_constant::value>; + + public: + slot_type() {} + ~slot_type() = delete; + using value_type = std::pair; + using mutable_value_type = std::pair; + + value_type value; + mutable_value_type mutable_value; + K key; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct(*alloc, &slot->mutable_value, + std::forward(args)...); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::forward(args)...); + } + } + + // Construct this slot by moving from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &slot->mutable_value, std::move(other->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::move(other->value)); + } + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + if (kMutableKeys::value) { + absl::allocator_traits::destroy(*alloc, &slot->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &slot->value); + } + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + emplace(new_slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &new_slot->value, + std::move(old_slot->value)); + } + destroy(alloc, old_slot); + } + + template + static void swap(Allocator* alloc, slot_type* a, slot_type* b) { + if (kMutableKeys::value) { + using std::swap; + swap(a->mutable_value, b->mutable_value); + } else { + value_type tmp = std::move(a->value); + absl::allocator_traits::destroy(*alloc, &a->value); + absl::allocator_traits::construct(*alloc, &a->value, + std::move(b->value)); + absl::allocator_traits::destroy(*alloc, &b->value); + absl::allocator_traits::construct(*alloc, &b->value, + std::move(tmp)); + } + } + + template + static void move(Allocator* alloc, slot_type* src, slot_type* dest) { + if (kMutableKeys::value) { + dest->mutable_value = std::move(src->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &dest->value); + absl::allocator_traits::construct(*alloc, &dest->value, + std::move(src->value)); + } + } + + template + static void move(Allocator* alloc, slot_type* first, slot_type* last, + slot_type* result) { + for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) + move(alloc, src, dest); + } +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc new file mode 100644 index 00000000..da87ca20 --- /dev/null +++ b/absl/container/internal/container_memory_test.cc @@ -0,0 +1,190 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/container_memory.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::testing::Pair; + +TEST(Memory, AlignmentLargerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +TEST(Memory, AlignmentSmallerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +class Fixture : public ::testing::Test { + using Alloc = std::allocator; + + public: + Fixture() { ptr_ = std::allocator_traits::allocate(*alloc(), 1); } + ~Fixture() override { + std::allocator_traits::destroy(*alloc(), ptr_); + std::allocator_traits::deallocate(*alloc(), ptr_, 1); + } + std::string* ptr() { return ptr_; } + Alloc* alloc() { return &alloc_; } + + private: + Alloc alloc_; + std::string* ptr_; +}; + +TEST_F(Fixture, ConstructNoArgs) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple()); + EXPECT_EQ(*ptr(), ""); +} + +TEST_F(Fixture, ConstructOneArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde")); + EXPECT_EQ(*ptr(), "abcde"); +} + +TEST_F(Fixture, ConstructTwoArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a')); + EXPECT_EQ(*ptr(), "aaaaa"); +} + +TEST(PairArgs, NoArgs) { + EXPECT_THAT(PairArgs(), + Pair(std::forward_as_tuple(), std::forward_as_tuple())); +} + +TEST(PairArgs, TwoArgs) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(1, 'A')); +} + +TEST(PairArgs, Pair) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::make_pair(1, 'A'))); +} + +TEST(PairArgs, Piecewise) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::piecewise_construct, std::forward_as_tuple(1), + std::forward_as_tuple('A'))); +} + +TEST(WithConstructed, Simple) { + EXPECT_EQ(1, WithConstructed( + std::make_tuple(std::string("a")), + [](absl::string_view str) { return str.size(); })); +} + +template +decltype(DecomposeValue(std::declval(), std::declval())) +DecomposeValueImpl(int, F&& f, Arg&& arg) { + return DecomposeValue(std::forward(f), std::forward(arg)); +} + +template +const char* DecomposeValueImpl(char, F&& f, Arg&& arg) { + return "not decomposable"; +} + +template +decltype(DecomposeValueImpl(0, std::declval(), std::declval())) +TryDecomposeValue(F&& f, Arg&& arg) { + return DecomposeValueImpl(0, std::forward(f), std::forward(arg)); +} + +TEST(DecomposeValue, Decomposable) { + auto f = [](const int& x, int&& y) { + EXPECT_EQ(&x, &y); + EXPECT_EQ(42, x); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposeValue(f, 42)); +} + +TEST(DecomposeValue, NotDecomposable) { + auto f = [](void*) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42)); +} + +template +decltype(DecomposePair(std::declval(), std::declval()...)) +DecomposePairImpl(int, F&& f, Args&&... args) { + return DecomposePair(std::forward(f), std::forward(args)...); +} + +template +const char* DecomposePairImpl(char, F&& f, Args&&... args) { + return "not decomposable"; +} + +template +decltype(DecomposePairImpl(0, std::declval(), std::declval()...)) +TryDecomposePair(F&& f, Args&&... args) { + return DecomposePairImpl(0, std::forward(f), std::forward(args)...); +} + +TEST(DecomposePair, Decomposable) { + auto f = [](const int& x, std::piecewise_construct_t, std::tuple k, + std::tuple&& v) { + EXPECT_EQ(&x, &std::get<0>(k)); + EXPECT_EQ(42, x); + EXPECT_EQ(0.5, std::get<0>(v)); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5)); + EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5))); + EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct, + std::make_tuple(42), std::make_tuple(0.5))); +} + +TEST(DecomposePair, NotDecomposable) { + auto f = [](...) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", + TryDecomposePair(f)); + EXPECT_STREQ("not decomposable", + TryDecomposePair(f, std::piecewise_construct, std::make_tuple(), + std::make_tuple(0.5))); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h new file mode 100644 index 00000000..72c75fa0 --- /dev/null +++ b/absl/container/internal/hash_function_defaults.h @@ -0,0 +1,145 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Define the default Hash and Eq functions for SwissTable containers. +// +// std::hash and std::equal_to are not appropriate hash and equal +// functions for SwissTable containers. There are two reasons for this. +// +// SwissTable containers are power of 2 sized containers: +// +// This means they use the lower bits of the hash value to find the slot for +// each entry. The typical hash function for integral types is the identity. +// This is a very weak hash function for SwissTable and any power of 2 sized +// hashtable implementation which will lead to excessive collisions. For +// SwissTable we use murmur3 style mixing to reduce collisions to a minimum. +// +// SwissTable containers support heterogeneous lookup: +// +// In order to make heterogeneous lookup work, hash and equal functions must be +// polymorphic. At the same time they have to satisfy the same requirements the +// C++ standard imposes on hash functions and equality operators. That is: +// +// if hash_default_eq(a, b) returns true for any a and b of type T, then +// hash_default_hash(a) must equal hash_default_hash(b) +// +// For SwissTable containers this requirement is relaxed to allow a and b of +// any and possibly different types. Note that like the standard the hash and +// equal functions are still bound to T. This is important because some type U +// can be hashed by/tested for equality differently depending on T. A notable +// example is `const char*`. `const char*` is treated as a c-style string when +// the hash function is hash but as a pointer when the hash function is +// hash. +// +#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// The hash of an object of type T is computed by using absl::Hash. +template +struct HashEq { + using Hash = absl::Hash; + using Eq = std::equal_to; +}; + +struct StringHash { + using is_transparent = void; + + size_t operator()(absl::string_view v) const { + return absl::Hash{}(v); + } +}; + +// Supports heterogeneous lookup for string-like elements. +struct StringHashEq { + using Hash = StringHash; + struct Eq { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const { + return lhs == rhs; + } + }; +}; +template <> +struct HashEq : StringHashEq {}; +template <> +struct HashEq : StringHashEq {}; + +// Supports heterogeneous lookup for pointers and smart pointers. +template +struct HashEq { + struct Hash { + using is_transparent = void; + template + size_t operator()(const U& ptr) const { + return absl::Hash{}(HashEq::ToPtr(ptr)); + } + }; + struct Eq { + using is_transparent = void; + template + bool operator()(const A& a, const B& b) const { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + + private: + static const T* ToPtr(const T* ptr) { return ptr; } + template + static const T* ToPtr(const std::unique_ptr& ptr) { + return ptr.get(); + } + template + static const T* ToPtr(const std::shared_ptr& ptr) { + return ptr.get(); + } +}; + +template +struct HashEq> : HashEq {}; +template +struct HashEq> : HashEq {}; + +// This header's visibility is restricted. If you need to access the default +// hasher please use the container's ::hasher alias instead. +// +// Example: typename Hash = typename absl::flat_hash_map::hasher +template +using hash_default_hash = typename container_internal::HashEq::Hash; + +// This header's visibility is restricted. If you need to access the default +// key equal please use the container's ::key_equal alias instead. +// +// Example: typename Eq = typename absl::flat_hash_map::key_equal +template +using hash_default_eq = typename container_internal::HashEq::Eq; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc new file mode 100644 index 00000000..4610843a --- /dev/null +++ b/absl/container/internal/hash_function_defaults_test.cc @@ -0,0 +1,303 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_function_defaults.h" + +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::testing::Types; + +TEST(Eq, Int32) { + hash_default_eq eq; + EXPECT_TRUE(eq(1, 1u)); + EXPECT_TRUE(eq(1, char{1})); + EXPECT_TRUE(eq(1, true)); + EXPECT_TRUE(eq(1, double{1.1})); + EXPECT_FALSE(eq(1, char{2})); + EXPECT_FALSE(eq(1, 2u)); + EXPECT_FALSE(eq(1, false)); + EXPECT_FALSE(eq(1, 2.)); +} + +TEST(Hash, Int32) { + hash_default_hash hash; + auto h = hash(1); + EXPECT_EQ(h, hash(1u)); + EXPECT_EQ(h, hash(char{1})); + EXPECT_EQ(h, hash(true)); + EXPECT_EQ(h, hash(double{1.1})); + EXPECT_NE(h, hash(2u)); + EXPECT_NE(h, hash(char{2})); + EXPECT_NE(h, hash(false)); + EXPECT_NE(h, hash(2.)); +} + +enum class MyEnum { A, B, C, D }; + +TEST(Eq, Enum) { + hash_default_eq eq; + EXPECT_TRUE(eq(MyEnum::A, MyEnum::A)); + EXPECT_FALSE(eq(MyEnum::A, MyEnum::B)); +} + +TEST(Hash, Enum) { + hash_default_hash hash; + + for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) { + auto h = hash(e); + EXPECT_EQ(h, hash_default_hash{}(static_cast(e))); + EXPECT_NE(h, hash(MyEnum::D)); + } +} + +using StringTypes = ::testing::Types; + +template +struct EqString : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_CASE(EqString, StringTypes); + +template +struct HashString : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_CASE(HashString, StringTypes); + +TYPED_TEST(EqString, Works) { + auto eq = this->key_eq; + EXPECT_TRUE(eq("a", "a")); + EXPECT_TRUE(eq("a", absl::string_view("a"))); + EXPECT_TRUE(eq("a", std::string("a"))); + EXPECT_FALSE(eq("a", "b")); + EXPECT_FALSE(eq("a", absl::string_view("b"))); + EXPECT_FALSE(eq("a", std::string("b"))); +} + +TYPED_TEST(HashString, Works) { + auto hash = this->hasher; + auto h = hash("a"); + EXPECT_EQ(h, hash(absl::string_view("a"))); + EXPECT_EQ(h, hash(std::string("a"))); + EXPECT_NE(h, hash(absl::string_view("b"))); + EXPECT_NE(h, hash(std::string("b"))); +} + +struct NoDeleter { + template + void operator()(const T* ptr) const {} +}; + +using PointerTypes = + ::testing::Types, + std::unique_ptr, + std::unique_ptr, std::unique_ptr, + std::shared_ptr, std::shared_ptr>; + +template +struct EqPointer : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_CASE(EqPointer, PointerTypes); + +template +struct HashPointer : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_CASE(HashPointer, PointerTypes); + +TYPED_TEST(EqPointer, Works) { + int dummy; + auto eq = this->key_eq; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_TRUE(eq(ptr, cptr)); + EXPECT_TRUE(eq(ptr, sptr)); + EXPECT_TRUE(eq(ptr, uptr)); + EXPECT_TRUE(eq(ptr, csptr)); + EXPECT_TRUE(eq(ptr, cuptr)); + EXPECT_FALSE(eq(&dummy, cptr)); + EXPECT_FALSE(eq(&dummy, sptr)); + EXPECT_FALSE(eq(&dummy, uptr)); + EXPECT_FALSE(eq(&dummy, csptr)); + EXPECT_FALSE(eq(&dummy, cuptr)); +} + +TEST(Hash, DerivedAndBase) { + struct Base {}; + struct Derived : Base {}; + + hash_default_hash hasher; + + Base base; + Derived derived; + EXPECT_NE(hasher(&base), hasher(&derived)); + EXPECT_EQ(hasher(static_cast(&derived)), hasher(&derived)); + + auto dp = std::make_shared(); + EXPECT_EQ(hasher(static_cast(dp.get())), hasher(dp)); +} + +TEST(Hash, FunctionPointer) { + using Func = int (*)(); + hash_default_hash hasher; + hash_default_eq eq; + + Func p1 = [] { return 1; }, p2 = [] { return 2; }; + EXPECT_EQ(hasher(p1), hasher(p1)); + EXPECT_TRUE(eq(p1, p1)); + + EXPECT_NE(hasher(p1), hasher(p2)); + EXPECT_FALSE(eq(p1, p2)); +} + +TYPED_TEST(HashPointer, Works) { + int dummy; + auto hash = this->hasher; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_EQ(hash(ptr), hash(cptr)); + EXPECT_EQ(hash(ptr), hash(sptr)); + EXPECT_EQ(hash(ptr), hash(uptr)); + EXPECT_EQ(hash(ptr), hash(csptr)); + EXPECT_EQ(hash(ptr), hash(cuptr)); + EXPECT_NE(hash(&dummy), hash(cptr)); + EXPECT_NE(hash(&dummy), hash(sptr)); + EXPECT_NE(hash(&dummy), hash(uptr)); + EXPECT_NE(hash(&dummy), hash(csptr)); + EXPECT_NE(hash(&dummy), hash(cuptr)); +} + +// Cartesian product of (string, std::string, absl::string_view) +// with (string, std::string, absl::string_view, const char*). +using StringTypesCartesianProduct = Types< + // clang-format off + + std::pair, + std::pair, + std::pair, + + std::pair, + std::pair, + std::pair>; +// clang-format on + +constexpr char kFirstString[] = "abc123"; +constexpr char kSecondString[] = "ijk456"; + +template +struct StringLikeTest : public ::testing::Test { + typename T::first_type a1{kFirstString}; + typename T::second_type b1{kFirstString}; + typename T::first_type a2{kSecondString}; + typename T::second_type b2{kSecondString}; + hash_default_eq eq; + hash_default_hash hash; +}; + +TYPED_TEST_CASE_P(StringLikeTest); + +TYPED_TEST_P(StringLikeTest, Eq) { + EXPECT_TRUE(this->eq(this->a1, this->b1)); + EXPECT_TRUE(this->eq(this->b1, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, NotEq) { + EXPECT_FALSE(this->eq(this->a1, this->b2)); + EXPECT_FALSE(this->eq(this->b2, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, HashEq) { + EXPECT_EQ(this->hash(this->a1), this->hash(this->b1)); + EXPECT_EQ(this->hash(this->a2), this->hash(this->b2)); + // It would be a poor hash function which collides on these strings. + EXPECT_NE(this->hash(this->a1), this->hash(this->b2)); +} + +TYPED_TEST_CASE(StringLikeTest, StringTypesCartesianProduct); + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +enum Hash : size_t { + kStd = 0x2, // std::hash +#ifdef _MSC_VER + kExtension = kStd, // In MSVC, std::hash == ::hash +#else // _MSC_VER + kExtension = 0x4, // ::hash (GCC extension) +#endif // _MSC_VER +}; + +// H is a bitmask of Hash enumerations. +// Hashable is hashable via all means specified in H. +template +struct Hashable { + static constexpr bool HashableBy(Hash h) { return h & H; } +}; + +namespace std { +template +struct hash> { + template , + class = typename std::enable_if::type> + size_t operator()(E) const { + return kStd; + } +}; +} // namespace std + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +template +size_t Hash(const T& v) { + return hash_default_hash()(v); +} + +TEST(Delegate, HashDispatch) { + EXPECT_EQ(Hash(kStd), Hash(Hashable())); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc new file mode 100644 index 00000000..aef41d72 --- /dev/null +++ b/absl/container/internal/hash_generator_testing.cc @@ -0,0 +1,74 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_generator_testing.h" + +#include + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace hash_internal { +namespace { + +class RandomDeviceSeedSeq { + public: + using result_type = typename std::random_device::result_type; + + template + void generate(Iterator start, Iterator end) { + while (start != end) { + *start = gen_(); + ++start; + } + } + + private: + std::random_device gen_; +}; + +} // namespace + +std::mt19937_64* GetSharedRng() { + RandomDeviceSeedSeq seed_seq; + static auto* rng = new std::mt19937_64(seed_seq); + return rng; +} + +std::string Generator::operator()() const { + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution chars(0x20, 0x7E); + std::string res; + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetSharedRng()); }); + return res; +} + +absl::string_view Generator::operator()() const { + static auto* arena = new std::deque(); + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution chars(0x20, 0x7E); + arena->emplace_back(); + auto& res = arena->back(); + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetSharedRng()); }); + return res; +} + +} // namespace hash_internal +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hash_generator_testing.h b/absl/container/internal/hash_generator_testing.h new file mode 100644 index 00000000..65e88964 --- /dev/null +++ b/absl/container/internal/hash_generator_testing.h @@ -0,0 +1,152 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates random values for testing. Specialized only for the few types we +// care about. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace hash_internal { +namespace generator_internal { + +template +struct IsMap : std::false_type {}; + +template +struct IsMap> : std::true_type {}; + +} // namespace generator_internal + +std::mt19937_64* GetSharedRng(); + +enum Enum { + kEnumEmpty, + kEnumDeleted, +}; + +enum class EnumClass : uint64_t { + kEmpty, + kDeleted, +}; + +inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) { + return o << static_cast(ec); +} + +template +struct Generator; + +template +struct Generator::value>::type> { + T operator()() const { + std::uniform_int_distribution dist; + return dist(*GetSharedRng()); + } +}; + +template <> +struct Generator { + Enum operator()() const { + std::uniform_int_distribution::type> + dist; + while (true) { + auto variate = dist(*GetSharedRng()); + if (variate != kEnumEmpty && variate != kEnumDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + EnumClass operator()() const { + std::uniform_int_distribution< + typename std::underlying_type::type> + dist; + while (true) { + EnumClass variate = static_cast(dist(*GetSharedRng())); + if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + std::string operator()() const; +}; + +template <> +struct Generator { + absl::string_view operator()() const; +}; + +template <> +struct Generator { + NonStandardLayout operator()() const { + return NonStandardLayout(Generator()()); + } +}; + +template +struct Generator> { + std::pair operator()() const { + return std::pair(Generator::type>()(), + Generator::type>()()); + } +}; + +template +struct Generator> { + std::tuple operator()() const { + return std::tuple(Generator::type>()()...); + } +}; + +template +struct Generator().key()), + decltype(std::declval().value())>> + : Generator().key())>::type, + typename std::decay().value())>::type>> {}; + +template +using GeneratedType = decltype( + std::declval::value, + typename Container::value_type, + typename Container::key_type>::type>&>()()); + +} // namespace hash_internal +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h new file mode 100644 index 00000000..9c310ad4 --- /dev/null +++ b/absl/container/internal/hash_policy_testing.h @@ -0,0 +1,184 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities to help tests verify that hash tables properly handle stateful +// allocators and hash functions. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace hash_testing_internal { + +template +struct WithId { + WithId() : id_(next_id()) {} + WithId(const WithId& that) : id_(that.id_) {} + WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; } + WithId& operator=(const WithId& that) { + id_ = that.id_; + return *this; + } + WithId& operator=(WithId&& that) { + id_ = that.id_; + that.id_ = 0; + return *this; + } + + size_t id() const { return id_; } + + friend bool operator==(const WithId& a, const WithId& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); } + + protected: + explicit WithId(size_t id) : id_(id) {} + + private: + size_t id_; + + template + static size_t next_id() { + // 0 is reserved for moved from state. + static size_t gId = 1; + return gId++; + } +}; + +} // namespace hash_testing_internal + +struct NonStandardLayout { + NonStandardLayout() {} + explicit NonStandardLayout(std::string s) : value(std::move(s)) {} + virtual ~NonStandardLayout() {} + + friend bool operator==(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value == b.value; + } + friend bool operator!=(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value != b.value; + } + + template + friend H AbslHashValue(H h, const NonStandardLayout& v) { + return H::combine(std::move(h), v.value); + } + + std::string value; +}; + +struct StatefulTestingHash + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingHash> { + template + size_t operator()(const T& t) const { + return absl::Hash{}(t); + } +}; + +struct StatefulTestingEqual + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingEqual> { + template + bool operator()(const T& t, const U& u) const { + return t == u; + } +}; + +// It is expected that Alloc() == Alloc() for all allocators so we cannot use +// WithId base. We need to explicitly assign ids. +template +struct Alloc : std::allocator { + using propagate_on_container_swap = std::true_type; + + // Using old paradigm for this to ensure compatibility. + explicit Alloc(size_t id = 0) : id_(id) {} + + Alloc(const Alloc&) = default; + Alloc& operator=(const Alloc&) = default; + + template + Alloc(const Alloc& that) : std::allocator(that), id_(that.id()) {} + + template + struct rebind { + using other = Alloc; + }; + + size_t id() const { return id_; } + + friend bool operator==(const Alloc& a, const Alloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); } + + private: + size_t id_ = (std::numeric_limits::max)(); +}; + +template +auto items(const Map& m) -> std::vector< + std::pair> { + using std::get; + std::vector> res; + res.reserve(m.size()); + for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v)); + return res; +} + +template +auto keys(const Set& s) + -> std::vector::type> { + std::vector::type> res; + res.reserve(s.size()); + for (const auto& v : s) res.emplace_back(v); + return res; +} + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions +// where the unordered containers are missing certain constructors that +// take allocator arguments. This test is defined ad-hoc for the platforms +// we care about (notably Crosstool 17) because libstdcxx's useless +// versioning scheme precludes a more principled solution. +// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) +// "the unordered associative containers in and +// meet the allocator-aware container requirements;" +#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \ +( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 )) +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 +#else +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ diff --git a/absl/container/internal/hash_policy_testing_test.cc b/absl/container/internal/hash_policy_testing_test.cc new file mode 100644 index 00000000..00c436b3 --- /dev/null +++ b/absl/container/internal/hash_policy_testing_test.cc @@ -0,0 +1,45 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_testing.h" + +#include "gtest/gtest.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +TEST(_, Hash) { + StatefulTestingHash h1; + EXPECT_EQ(1, h1.id()); + StatefulTestingHash h2; + EXPECT_EQ(2, h2.id()); + StatefulTestingHash h1c(h1); + EXPECT_EQ(1, h1c.id()); + StatefulTestingHash h2m(std::move(h2)); + EXPECT_EQ(2, h2m.id()); + EXPECT_EQ(0, h2.id()); + StatefulTestingHash h3; + EXPECT_EQ(3, h3.id()); + h3 = StatefulTestingHash(); + EXPECT_EQ(4, h3.id()); + h3 = std::move(h1); + EXPECT_EQ(1, h3.id()); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h new file mode 100644 index 00000000..41e26212 --- /dev/null +++ b/absl/container/internal/hash_policy_traits.h @@ -0,0 +1,191 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ + +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// Defines how slots are initialized/destroyed/moved. +template +struct hash_policy_traits { + private: + struct ReturnKey { + // We return `Key` here. + // When Key=T&, we forward the lvalue reference. + // When Key=T, we return by value to avoid a dangling reference. + // eg, for string_hash_map. + template + Key operator()(Key&& k, const Args&...) const { + return std::forward(k); + } + }; + + template + struct ConstantIteratorsImpl : std::false_type {}; + + template + struct ConstantIteratorsImpl> + : P::constant_iterators {}; + + public: + // The actual object stored in the hash table. + using slot_type = typename Policy::slot_type; + + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + + // The argument type for insertions into the hashtable. This is different + // from value_type for increased performance. See initializer_list constructor + // and insert() member functions for more details. + using init_type = typename Policy::init_type; + + using reference = decltype(Policy::element(std::declval())); + using pointer = typename std::remove_reference::type*; + using value_type = typename std::remove_reference::type; + + // Policies can set this variable to tell raw_hash_set that all iterators + // should be constant, even `iterator`. This is useful for set-like + // containers. + // Defaults to false if not provided by the policy. + using constant_iterators = ConstantIteratorsImpl<>; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + Policy::construct(alloc, slot, std::forward(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transferred. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { + transfer_impl(alloc, new_slot, old_slot, 0); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static auto element(slot_type* slot) -> decltype(P::element(slot)) { + return P::element(slot); + } + + // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. + // + // If `slot` is nullptr, returns the constant amount of memory owned by any + // full slot or -1 if slots own variable amounts of memory. + // + // PRECONDITION: `slot` is INITIALIZED or nullptr + template + static size_t space_used(const slot_type* slot) { + return P::space_used(slot); + } + + // Provides generalized access to the key for elements, both for elements in + // the table and for elements that have not yet been inserted (or even + // constructed). We would like an API that allows us to say: `key(args...)` + // but we cannot do that for all cases, so we use this more general API that + // can be used for many things, including the following: + // + // - Given an element in a table, get its key. + // - Given an element initializer, get its key. + // - Given `emplace()` arguments, get the element key. + // + // Implementations of this must adhere to a very strict technical + // specification around aliasing and consuming arguments: + // + // Let `value_type` be the result type of `element()` without ref- and + // cv-qualifiers. The first argument is a functor, the rest are constructor + // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where + // `k` is the element key, and `xs...` are the new constructor arguments for + // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias + // `ts...`. The key won't be touched once `xs...` are used to construct an + // element; `ts...` won't be touched at all, which allows `apply()` to consume + // any rvalues among them. + // + // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not + // trigger a hard compile error unless it originates from `f`. In other words, + // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not + // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. + // + // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, + // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. + template + static auto apply(F&& f, Ts&&... ts) + -> decltype(P::apply(std::forward(f), std::forward(ts)...)) { + return P::apply(std::forward(f), std::forward(ts)...); + } + + // Returns the "key" portion of the slot. + // Used for node handle manipulation. + template + static auto key(slot_type* slot) + -> decltype(P::apply(ReturnKey(), element(slot))) { + return P::apply(ReturnKey(), element(slot)); + } + + // Returns the "value" (as opposed to the "key") portion of the element. Used + // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + private: + // Use auto -> decltype as an enabler. + template + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, int) + -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { + P::transfer(alloc, new_slot, old_slot); + } + template + static void transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, char) { + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc new file mode 100644 index 00000000..07cecdfa --- /dev/null +++ b/absl/container/internal/hash_policy_traits_test.cc @@ -0,0 +1,144 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_traits.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::testing::MockFunction; +using ::testing::Return; +using ::testing::ReturnRef; + +using Alloc = std::allocator; +using Slot = int; + +struct PolicyWithoutOptionalOps { + using slot_type = Slot; + using key_type = Slot; + using init_type = Slot; + + static std::function construct; + static std::function destroy; + + static std::function element; + static int apply(int v) { return apply_impl(v); } + static std::function apply_impl; + static std::function value; +}; + +std::function PolicyWithoutOptionalOps::construct; +std::function PolicyWithoutOptionalOps::destroy; + +std::function PolicyWithoutOptionalOps::element; +std::function PolicyWithoutOptionalOps::apply_impl; +std::function PolicyWithoutOptionalOps::value; + +struct PolicyWithOptionalOps : PolicyWithoutOptionalOps { + static std::function transfer; +}; + +std::function PolicyWithOptionalOps::transfer; + +struct Test : ::testing::Test { + Test() { + PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) { + construct.Call(a1, a2, std::move(a3)); + }; + PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) { + destroy.Call(a1, a2); + }; + + PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& { + return element.Call(a1); + }; + PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int { + return apply.Call(a1); + }; + PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& { + return value.Call(a1); + }; + + PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) { + return transfer.Call(a1, a2, a3); + }; + } + + std::allocator alloc; + int a = 53; + + MockFunction construct; + MockFunction destroy; + + MockFunction element; + MockFunction apply; + MockFunction value; + + MockFunction transfer; +}; + +TEST_F(Test, construct) { + EXPECT_CALL(construct, Call(&alloc, &a, 53)); + hash_policy_traits::construct(&alloc, &a, 53); +} + +TEST_F(Test, destroy) { + EXPECT_CALL(destroy, Call(&alloc, &a)); + hash_policy_traits::destroy(&alloc, &a); +} + +TEST_F(Test, element) { + int b = 0; + EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits::element(&a)); +} + +TEST_F(Test, apply) { + EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337)); + EXPECT_EQ(1337, (hash_policy_traits::apply(42))); +} + +TEST_F(Test, value) { + int b = 0; + EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits::value(&a)); +} + +TEST_F(Test, without_transfer) { + int b = 42; + EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b)); + EXPECT_CALL(construct, Call(&alloc, &a, b)); + EXPECT_CALL(destroy, Call(&alloc, &b)); + hash_policy_traits::transfer(&alloc, &a, &b); +} + +TEST_F(Test, with_transfer) { + int b = 42; + EXPECT_CALL(transfer, Call(&alloc, &a, &b)); + hash_policy_traits::transfer(&alloc, &a, &b); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h new file mode 100644 index 00000000..b6a43512 --- /dev/null +++ b/absl/container/internal/hashtable_debug.h @@ -0,0 +1,110 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This library provides APIs to debug the probing behavior of hash tables. +// +// In general, the probing behavior is a black box for users and only the +// side effects can be measured in the form of performance differences. +// These APIs give a glimpse on the actual behavior of the probing algorithms in +// these hashtables given a specified hash function and a set of elements. +// +// The probe count distribution can be used to assess the quality of the hash +// function for that particular hash table. Note that a hash function that +// performs well in one hash table implementation does not necessarily performs +// well in a different one. +// +// This library supports std::unordered_{set,map}, dense_hash_{set,map} and +// absl::{flat,node,string}_hash_{set,map}. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ + +#include +#include +#include +#include + +#include "absl/container/internal/hashtable_debug_hooks.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// Returns the number of probes required to lookup `key`. Returns 0 for a +// search with no collisions. Higher values mean more hash collisions occurred; +// however, the exact meaning of this number varies according to the container +// type. +template +size_t GetHashtableDebugNumProbes( + const C& c, const typename C::key_type& key) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::GetNumProbes(c, key); +} + +// Gets a histogram of the number of probes for each elements in the container. +// The sum of all the values in the vector is equal to container.size(). +template +std::vector GetHashtableDebugNumProbesHistogram(const C& container) { + std::vector v; + for (auto it = container.begin(); it != container.end(); ++it) { + size_t num_probes = GetHashtableDebugNumProbes( + container, + absl::container_internal::hashtable_debug_internal::GetKey(*it, 0)); + v.resize(std::max(v.size(), num_probes + 1)); + v[num_probes]++; + } + return v; +} + +struct HashtableDebugProbeSummary { + size_t total_elements; + size_t total_num_probes; + double mean; +}; + +// Gets a summary of the probe count distribution for the elements in the +// container. +template +HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) { + auto probes = GetHashtableDebugNumProbesHistogram(container); + HashtableDebugProbeSummary summary = {}; + for (size_t i = 0; i < probes.size(); ++i) { + summary.total_elements += probes[i]; + summary.total_num_probes += probes[i] * i; + } + summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; + return summary; +} + +// Returns the number of bytes requested from the allocator by the container +// and not freed. +template +size_t AllocatedByteSize(const C& c) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::AllocatedByteSize(c); +} + +// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` +// and `c.size()` is equal to `num_elements`. +template +size_t LowerBoundAllocatedByteSize(size_t num_elements) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); +} + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/absl/container/internal/hashtable_debug_hooks.h b/absl/container/internal/hashtable_debug_hooks.h new file mode 100644 index 00000000..50ba6ba5 --- /dev/null +++ b/absl/container/internal/hashtable_debug_hooks.h @@ -0,0 +1,83 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Provides the internal API for hashtable_debug.h. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ + +#include + +#include +#include +#include + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace hashtable_debug_internal { + +// If it is a map, call get<0>(). +using std::get; +template +auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { + return get<0>(pair); +} + +// If it is not a map, return the value directly. +template +const typename T::key_type& GetKey(const typename T::key_type& key, char) { + return key; +} + +// Containers should specialize this to provide debug information for that +// container. +template +struct HashtableDebugAccess { + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + static size_t GetNumProbes(const Container& c, + const typename Container::key_type& key) { + if (!c.bucket_count()) return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { + if (it == e) return num_probes; + if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; + } + } + + // Returns the number of bytes requested from the allocator by the container + // and not freed. + // + // static size_t AllocatedByteSize(const Container& c); + + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type + // `Container` and `c.size()` is equal to `num_elements`. + // + // static size_t LowerBoundAllocatedByteSize(size_t num_elements); +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h new file mode 100644 index 00000000..f11a6ad2 --- /dev/null +++ b/absl/container/internal/layout.h @@ -0,0 +1,740 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// MOTIVATION AND TUTORIAL +// +// If you want to put in a single heap allocation N doubles followed by M ints, +// it's easy if N and M are known at compile time. +// +// struct S { +// double a[N]; +// int b[M]; +// }; +// +// S* p = new S; +// +// But what if N and M are known only in run time? Class template Layout to the +// rescue! It's a portable generalization of the technique known as struct hack. +// +// // This object will tell us everything we need to know about the memory +// // layout of double[N] followed by int[M]. It's structurally identical to +// // size_t[2] that stores N and M. It's very cheap to create. +// const Layout layout(N, M); +// +// // Allocate enough memory for both arrays. `AllocSize()` tells us how much +// // memory is needed. We are free to use any allocation function we want as +// // long as it returns aligned memory. +// std::unique_ptr p(new unsigned char[layout.AllocSize()]); +// +// // Obtain the pointer to the array of doubles. +// // Equivalent to `reinterpret_cast(p.get())`. +// // +// // We could have written layout.Pointer<0>(p) instead. If all the types are +// // unique you can use either form, but if some types are repeated you must +// // use the index form. +// double* a = layout.Pointer(p.get()); +// +// // Obtain the pointer to the array of ints. +// // Equivalent to `reinterpret_cast(p.get() + N * 8)`. +// int* b = layout.Pointer(p); +// +// If we are unable to specify sizes of all fields, we can pass as many sizes as +// we can to `Partial()`. In return, it'll allow us to access the fields whose +// locations and sizes can be computed from the provided information. +// `Partial()` comes in handy when the array sizes are embedded into the +// allocation. +// +// // size_t[1] containing N, size_t[1] containing M, double[N], int[M]. +// using L = Layout; +// +// unsigned char* Allocate(size_t n, size_t m) { +// const L layout(1, 1, n, m); +// unsigned char* p = new unsigned char[layout.AllocSize()]; +// *layout.Pointer<0>(p) = n; +// *layout.Pointer<1>(p) = m; +// return p; +// } +// +// void Use(unsigned char* p) { +// // First, extract N and M. +// // Specify that the first array has only one element. Using `prefix` we +// // can access the first two arrays but not more. +// constexpr auto prefix = L::Partial(1); +// size_t n = *prefix.Pointer<0>(p); +// size_t m = *prefix.Pointer<1>(p); +// +// // Now we can get pointers to the payload. +// const L layout(1, 1, n, m); +// double* a = layout.Pointer(p); +// int* b = layout.Pointer(p); +// } +// +// The layout we used above combines fixed-size with dynamically-sized fields. +// This is quite common. Layout is optimized for this use case and generates +// optimal code. All computations that can be performed at compile time are +// indeed performed at compile time. +// +// Efficiency tip: The order of fields matters. In `Layout` try to +// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no +// padding in between arrays. +// +// You can manually override the alignment of an array by wrapping the type in +// `Aligned`. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). `N` cannot be less than `alignof(T)`. +// +// `AllocSize()` and `Pointer()` are the most basic methods for dealing with +// memory layouts. Check out the reference or code below to discover more. +// +// EXAMPLE +// +// // Immutable move-only string with sizeof equal to sizeof(void*). The +// // string size and the characters are kept in the same heap allocation. +// class CompactString { +// public: +// CompactString(const char* s = "") { +// const size_t size = strlen(s); +// // size_t[1] followed by char[size + 1]. +// const L layout(1, size + 1); +// p_.reset(new unsigned char[layout.AllocSize()]); +// // If running under ASAN, mark the padding bytes, if any, to catch +// // memory errors. +// layout.PoisonPadding(p_.get()); +// // Store the size in the allocation. +// *layout.Pointer(p_.get()) = size; +// // Store the characters in the allocation. +// memcpy(layout.Pointer(p_.get()), s, size + 1); +// } +// +// size_t size() const { +// // Equivalent to reinterpret_cast(*p). +// return *L::Partial().Pointer(p_.get()); +// } +// +// const char* c_str() const { +// // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). +// // The argument in Partial(1) specifies that we have size_t[1] in front +// // of the characters. +// return L::Partial(1).Pointer(p_.get()); +// } +// +// private: +// // Our heap allocation contains a size_t followed by an array of chars. +// using L = Layout; +// std::unique_ptr p_; +// }; +// +// int main() { +// CompactString s = "hello"; +// assert(s.size() == 5); +// assert(strcmp(s.c_str(), "hello") == 0); +// } +// +// DOCUMENTATION +// +// The interface exported by this file consists of: +// - class `Layout<>` and its public members. +// - The public members of class `internal_layout::LayoutImpl<>`. That class +// isn't intended to be used directly, and its name and template parameter +// list are internal implementation details, but the class itself provides +// most of the functionality in this file. See comments on its members for +// detailed documentation. +// +// `Layout::Partial(count1,..., countm)` (where `m` <= `n`) returns a +// `LayoutImpl<>` object. `Layout layout(count1,..., countn)` +// creates a `Layout` object, which exposes the same functionality by inheriting +// from `LayoutImpl<>`. + +#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_ +#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef ADDRESS_SANITIZER +#include +#endif + +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +#if defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE +#endif + +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#include +#endif + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// A type wrapper that instructs `Layout` to use the specific alignment for the +// array. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). +// +// Requires: `N >= alignof(T)` and `N` is a power of 2. +template +struct Aligned; + +namespace internal_layout { + +template +struct NotAligned {}; + +template +struct NotAligned> { + static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); +}; + +template +using IntToSize = size_t; + +template +using TypeToSize = size_t; + +template +struct Type : NotAligned { + using type = T; +}; + +template +struct Type> { + using type = T; +}; + +template +struct SizeOf : NotAligned, std::integral_constant {}; + +template +struct SizeOf> : std::integral_constant {}; + +// Note: workaround for https://gcc.gnu.org/PR88115 +template +struct AlignOf : NotAligned { + static constexpr size_t value = alignof(T); +}; + +template +struct AlignOf> { + static_assert(N % alignof(T) == 0, + "Custom alignment can't be lower than the type's alignment"); + static constexpr size_t value = N; +}; + +// Does `Ts...` contain `T`? +template +using Contains = absl::disjunction...>; + +template +using CopyConst = + typename std::conditional::value, const To, To>::type; + +// Note: We're not qualifying this with absl:: because it doesn't compile under +// MSVC. +template +using SliceType = Span; + +// This namespace contains no types. It prevents functions defined in it from +// being found by ADL. +namespace adl_barrier { + +template +constexpr size_t Find(Needle, Needle, Ts...) { + static_assert(!Contains(), "Duplicate element type"); + return 0; +} + +template +constexpr size_t Find(Needle, T, Ts...) { + return adl_barrier::Find(Needle(), Ts()...) + 1; +} + +constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); } + +// Returns `q * m` for the smallest `q` such that `q * m >= n`. +// Requires: `m` is a power of two. It's enforced by IsLegalElementType below. +constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); } + +constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; } + +constexpr size_t Max(size_t a) { return a; } + +template +constexpr size_t Max(size_t a, size_t b, Ts... rest) { + return adl_barrier::Max(b < a ? a : b, rest...); +} + +template +std::string TypeName() { + std::string out; + int status = 0; + char* demangled = nullptr; +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE + demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); +#endif + if (status == 0 && demangled != nullptr) { // Demangling succeeded. + absl::StrAppend(&out, "<", demangled, ">"); + free(demangled); + } else { +#if defined(__GXX_RTTI) || defined(_CPPRTTI) + absl::StrAppend(&out, "<", typeid(T).name(), ">"); +#endif + } + return out; +} + +} // namespace adl_barrier + +template +using EnableIf = typename std::enable_if::type; + +// Can `T` be a template argument of `Layout`? +template +using IsLegalElementType = std::integral_constant< + bool, !std::is_reference::value && !std::is_volatile::value && + !std::is_reference::type>::value && + !std::is_volatile::type>::value && + adl_barrier::IsPow2(AlignOf::value)>; + +template +class LayoutImpl; + +// Public base class of `Layout` and the result type of `Layout::Partial()`. +// +// `Elements...` contains all template arguments of `Layout` that created this +// instance. +// +// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments +// passed to `Layout::Partial()` or `Layout::Layout()`. +// +// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is +// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we +// can compute offsets). +template +class LayoutImpl, absl::index_sequence, + absl::index_sequence> { + private: + static_assert(sizeof...(Elements) > 0, "At least one field is required"); + static_assert(absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + enum { + NumTypes = sizeof...(Elements), + NumSizes = sizeof...(SizeSeq), + NumOffsets = sizeof...(OffsetSeq), + }; + + // These are guaranteed by `Layout`. + static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), + "Internal error"); + static_assert(NumTypes > 0, "Internal error"); + + // Returns the index of `T` in `Elements...`. Results in a compilation error + // if `Elements...` doesn't contain exactly one instance of `T`. + template + static constexpr size_t ElementIndex() { + static_assert(Contains, Type::type>...>(), + "Type not found"); + return adl_barrier::Find(Type(), + Type::type>()...); + } + + template + using ElementAlignment = + AlignOf>::type>; + + public: + // Element types of all arrays packed in a tuple. + using ElementTypes = std::tuple::type...>; + + // Element type of the Nth array. + template + using ElementType = typename std::tuple_element::type; + + constexpr explicit LayoutImpl(IntToSize... sizes) + : size_{sizes...} {} + + // Alignment of the layout, equal to the strictest alignment of all elements. + // All pointers passed to the methods of layout must be aligned to this value. + static constexpr size_t Alignment() { + return adl_barrier::Max(AlignOf::value...); + } + + // Offset in bytes of the Nth array. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset<0>() == 0); // The ints starts from 0. + // assert(x.Offset<1>() == 16); // The doubles starts from 16. + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + template = 0> + constexpr size_t Offset() const { + return 0; + } + + template = 0> + constexpr size_t Offset() const { + static_assert(N < NumOffsets, "Index out of bounds"); + return adl_barrier::Align( + Offset() + SizeOf>() * size_[N - 1], + ElementAlignment::value); + } + + // Offset in bytes of the array with the specified element type. There must + // be exactly one such array and its zero-based index must be at most + // `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset() == 0); // The ints starts from 0. + // assert(x.Offset() == 16); // The doubles starts from 16. + template + constexpr size_t Offset() const { + return Offset()>(); + } + + // Offsets in bytes of all arrays for which the offsets are known. + constexpr std::array Offsets() const { + return {{Offset()...}}; + } + + // The number of elements in the Nth array. This is the Nth argument of + // `Layout::Partial()` or `Layout::Layout()` (zero-based). + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size<0>() == 3); + // assert(x.Size<1>() == 4); + // + // Requires: `N < NumSizes`. + template + constexpr size_t Size() const { + static_assert(N < NumSizes, "Index out of bounds"); + return size_[N]; + } + + // The number of elements in the array with the specified element type. + // There must be exactly one such array and its zero-based index must be + // at most `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size() == 3); + // assert(x.Size() == 4); + template + constexpr size_t Size() const { + return Size()>(); + } + + // The number of elements of all arrays for which they are known. + constexpr std::array Sizes() const { + return {{Size()...}}; + } + + // Pointer to the beginning of the Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<0>(p); + // double* doubles = x.Pointer<1>(p); + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst>* Pointer(Char* p) const { + using C = typename std::remove_const::type; + static_assert( + std::is_same() || std::is_same() || + std::is_same(), + "The argument must be a pointer to [const] [signed|unsigned] char"); + constexpr size_t alignment = Alignment(); + (void)alignment; + assert(reinterpret_cast(p) % alignment == 0); + return reinterpret_cast>*>(p + Offset()); + } + + // Pointer to the beginning of the array with the specified element type. + // There must be exactly one such array and its zero-based index must be at + // most `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer(p); + // double* doubles = x.Pointer(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst* Pointer(Char* p) const { + return Pointer()>(p); + } + + // Pointers to all arrays for which pointers are known. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // int* ints; + // double* doubles; + // std::tie(ints, doubles) = x.Pointers(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>*...> + Pointers(Char* p) const { + return std::tuple>*...>( + Pointer(p)...); + } + + // The Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice<0>(p); + // Span doubles = x.Slice<1>(p); + // + // Requires: `N < NumSizes`. + // Requires: `p` is aligned to `Alignment()`. + template + SliceType>> Slice(Char* p) const { + return SliceType>>(Pointer(p), Size()); + } + + // The array with the specified element type. There must be exactly one + // such array and its zero-based index must be less than `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice(p); + // Span doubles = x.Slice(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + SliceType> Slice(Char* p) const { + return Slice()>(p); + } + + // All arrays with known sizes. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // Span ints; + // Span doubles; + // std::tie(ints, doubles) = x.Slices(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>>...> + Slices(Char* p) const { + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed + // in 6.1). + (void)p; + return std::tuple>>...>( + Slice(p)...); + } + + // The size of the allocation that fits all arrays. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes + // + // Requires: `NumSizes == sizeof...(Ts)`. + constexpr size_t AllocSize() const { + static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); + return Offset() + + SizeOf>() * size_[NumTypes - 1]; + } + + // If built with --config=asan, poisons padding bytes (if any) in the + // allocation. The pointer must point to a memory block at least + // `AllocSize()` bytes in length. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // Requires: `p` is aligned to `Alignment()`. + template = 0> + void PoisonPadding(const Char* p) const { + Pointer<0>(p); // verify the requirements on `Char` and `p` + } + + template = 0> + void PoisonPadding(const Char* p) const { + static_assert(N < NumOffsets, "Index out of bounds"); + (void)p; +#ifdef ADDRESS_SANITIZER + PoisonPadding(p); + // The `if` is an optimization. It doesn't affect the observable behaviour. + if (ElementAlignment::value % ElementAlignment::value) { + size_t start = + Offset() + SizeOf>() * size_[N - 1]; + ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); + } +#endif + } + + // Human-readable description of the memory layout. Useful for debugging. + // Slow. + // + // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed + // // by an unknown number of doubles. + // auto x = Layout::Partial(5, 3); + // assert(x.DebugString() == + // "@0(1)[5]; @8(4)[3]; @24(8)"); + // + // Each field is in the following format: @offset(sizeof)[size] ( + // may be missing depending on the target platform). For example, + // @8(4)[3] means that at offset 8 we have an array of ints, where each + // int is 4 bytes, and we have 3 of those ints. The size of the last field may + // be missing (as in the example above). Only fields with known offsets are + // described. Type names may differ across platforms: one compiler might + // produce "unsigned*" where another produces "unsigned int *". + std::string DebugString() const { + const auto offsets = Offsets(); + const size_t sizes[] = {SizeOf>()...}; + const std::string types[] = {adl_barrier::TypeName>()...}; + std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); + for (size_t i = 0; i != NumOffsets - 1; ++i) { + absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], + "(", sizes[i + 1], ")"); + } + // NumSizes is a constant that may be zero. Some compilers cannot see that + // inside the if statement "size_[NumSizes - 1]" must be valid. + int last = static_cast(NumSizes) - 1; + if (NumTypes == NumSizes && last >= 0) { + absl::StrAppend(&res, "[", size_[last], "]"); + } + return res; + } + + private: + // Arguments of `Layout::Partial()` or `Layout::Layout()`. + size_t size_[NumSizes > 0 ? NumSizes : 1]; +}; + +template +using LayoutType = LayoutImpl< + std::tuple, absl::make_index_sequence, + absl::make_index_sequence>; + +} // namespace internal_layout + +// Descriptor of arrays of various types and sizes laid out in memory one after +// another. See the top of the file for documentation. +// +// Check out the public API of internal_layout::LayoutImpl above. The type is +// internal to the library but its methods are public, and they are inherited +// by `Layout`. +template +class Layout : public internal_layout::LayoutType { + public: + static_assert(sizeof...(Ts) > 0, "At least one field is required"); + static_assert( + absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + // The result type of `Partial()` with `NumSizes` arguments. + template + using PartialType = internal_layout::LayoutType; + + // `Layout` knows the element types of the arrays we want to lay out in + // memory but not the number of elements in each array. + // `Partial(size1, ..., sizeN)` allows us to specify the latter. The + // resulting immutable object can be used to obtain pointers to the + // individual arrays. + // + // It's allowed to pass fewer array sizes than the number of arrays. E.g., + // if all you need is to the offset of the second array, you only need to + // pass one argument -- the number of elements in the first array. + // + // // int[3] followed by 4 bytes of padding and an unknown number of + // // doubles. + // auto x = Layout::Partial(3); + // // doubles start at byte 16. + // assert(x.Offset<1>() == 16); + // + // If you know the number of elements in all arrays, you can still call + // `Partial()` but it's more convenient to use the constructor of `Layout`. + // + // Layout x(3, 5); + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + // + // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. + // Requires: all arguments are convertible to `size_t`. + template + static constexpr PartialType Partial(Sizes&&... sizes) { + static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); + return PartialType(absl::forward(sizes)...); + } + + // Creates a layout with the sizes of all arrays specified. If you know + // only the sizes of the first N arrays (where N can be zero), you can use + // `Partial()` defined above. The constructor is essentially equivalent to + // calling `Partial()` and passing in all array sizes; the constructor is + // provided as a convenient abbreviation. + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + constexpr explicit Layout(internal_layout::TypeToSize... sizes) + : internal_layout::LayoutType(sizes...) {} +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc new file mode 100644 index 00000000..b9f98471 --- /dev/null +++ b/absl/container/internal/layout_test.cc @@ -0,0 +1,1557 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/layout.h" + +// We need ::max_align_t because some libstdc++ versions don't provide +// std::max_align_t +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/types/span.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::absl::Span; +using ::testing::ElementsAre; + +size_t Distance(const void* from, const void* to) { + ABSL_RAW_CHECK(from <= to, "Distance must be non-negative"); + return static_cast(to) - static_cast(from); +} + +template +Expected Type(Actual val) { + static_assert(std::is_same(), ""); + return val; +} + +// Helper class to test different size and alignments. +struct alignas(8) Int128 { + uint64_t a, b; + friend bool operator==(Int128 lhs, Int128 rhs) { + return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b); + } + + static std::string Name() { + return internal_layout::adl_barrier::TypeName(); + } +}; + +// Properties of types that this test relies on. +static_assert(sizeof(int8_t) == 1, ""); +static_assert(alignof(int8_t) == 1, ""); +static_assert(sizeof(int16_t) == 2, ""); +static_assert(alignof(int16_t) == 2, ""); +static_assert(sizeof(int32_t) == 4, ""); +static_assert(alignof(int32_t) == 4, ""); +static_assert(sizeof(Int128) == 16, ""); +static_assert(alignof(Int128) == 8, ""); + +template +void SameType() { + static_assert(std::is_same(), ""); +} + +TEST(Layout, ElementType) { + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } +} + +TEST(Layout, ElementTypes) { + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, + decltype(L::Partial())::ElementTypes>(); + SameType, + decltype(L::Partial(0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0, 0))::ElementTypes>(); + } +} + +TEST(Layout, OffsetByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(0, L(3).Offset<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(12, L::Partial(3).Offset<1>()); + EXPECT_EQ(0, L::Partial(3, 5).Offset<0>()); + EXPECT_EQ(12, L::Partial(3, 5).Offset<1>()); + EXPECT_EQ(0, L(3, 5).Offset<0>()); + EXPECT_EQ(12, L(3, 5).Offset<1>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<1>()); + EXPECT_EQ(0, L::Partial(1).Offset<0>()); + EXPECT_EQ(4, L::Partial(1).Offset<1>()); + EXPECT_EQ(0, L::Partial(5).Offset<0>()); + EXPECT_EQ(8, L::Partial(5).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3).Offset<0>()); + EXPECT_EQ(8, L::Partial(5, 3).Offset<1>()); + EXPECT_EQ(24, L::Partial(5, 3).Offset<2>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>()); + EXPECT_EQ(0, L(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L(5, 3, 1).Offset<1>()); + } +} + +TEST(Layout, OffsetByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(3).Offset()); + EXPECT_EQ(0, L(3).Offset()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(1).Offset()); + EXPECT_EQ(4, L::Partial(1).Offset()); + EXPECT_EQ(0, L::Partial(5).Offset()); + EXPECT_EQ(8, L::Partial(5).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3).Offset()); + EXPECT_EQ(8, L::Partial(5, 3).Offset()); + EXPECT_EQ(24, L::Partial(5, 3).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(0, L(5, 3, 1).Offset()); + EXPECT_EQ(24, L(5, 3, 1).Offset()); + EXPECT_EQ(8, L(5, 3, 1).Offset()); + } +} + +TEST(Layout, Offsets) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0)); + EXPECT_THAT(L(3).Offsets(), ElementsAre(0)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4)); + EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8)); + EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + } +} + +TEST(Layout, AllocSize) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).AllocSize()); + EXPECT_EQ(12, L::Partial(3).AllocSize()); + EXPECT_EQ(12, L(3).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(32, L::Partial(3, 5).AllocSize()); + EXPECT_EQ(32, L(3, 5).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize()); + EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize()); + EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize()); + EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize()); + EXPECT_EQ(136, L(3, 5, 7).AllocSize()); + } +} + +TEST(Layout, SizeByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L(3).Size<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L(3, 5).Size<0>()); + EXPECT_EQ(5, L(3, 5).Size<1>()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>()); + EXPECT_EQ(3, L(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L(3, 5, 7).Size<2>()); + } +} + +TEST(Layout, SizeByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size()); + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L(3).Size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L::Partial(3, 5).Size()); + EXPECT_EQ(5, L::Partial(3, 5).Size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(3, L(3, 5, 7).Size()); + EXPECT_EQ(5, L(3, 5, 7).Size()); + EXPECT_EQ(7, L(3, 5, 7).Size()); + } +} + +TEST(Layout, Sizes) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L(3).Sizes(), ElementsAre(3)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + } +} + +TEST(Layout, PointerByIndex) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, + Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, PointerByType) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, MutablePointerByIndex) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, MutablePointerByType) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, Pointers) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } +} + +TEST(Layout, MutablePointers) { + alignas(max_align_t) unsigned char p[100]; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } +} + +TEST(Layout, SliceByIndexSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, SliceByTypeSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, MutableSliceByIndexSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, MutableSliceByTypeSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, SliceByIndexData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, + Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, + Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, + Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, + Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, SliceByTypeData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ(24, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, + Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +TEST(Layout, MutableSliceByIndexData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, Distance(p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ(24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ(8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, MutableSliceByTypeData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +MATCHER_P(IsSameSlice, slice, "") { + return arg.size() == slice.size() && arg.data() == slice.data(); +} + +template +class TupleMatcher { + public: + explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {} + + template + bool MatchAndExplain(const Tuple& p, + testing::MatchResultListener* /* listener */) const { + static_assert(std::tuple_size::value == sizeof...(M), ""); + return MatchAndExplainImpl( + p, absl::make_index_sequence::value>{}); + } + + // For the matcher concept. Left empty as we don't really need the diagnostics + // right now. + void DescribeTo(::std::ostream* os) const {} + void DescribeNegationTo(::std::ostream* os) const {} + + private: + template + bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence) const { + // Using std::min as a simple variadic "and". + return std::min( + {true, testing::SafeMatcherCast< + const typename std::tuple_element::type&>( + std::get(matchers_)) + .Matches(std::get(p))...}); + } + + std::tuple matchers_; +}; + +template +testing::PolymorphicMatcher> Tuple(M... matchers) { + return testing::MakePolymorphicMatcher( + TupleMatcher(std::move(matchers)...)); +} + +TEST(Layout, Slices) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT( + (Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, MutableSlices) { + alignas(max_align_t) unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT((Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT( + (Type, Span, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT( + (Type, Span, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, UnalignedTypes) { + constexpr Layout x(1, 2, 3); + alignas(max_align_t) unsigned char p[x.AllocSize() + 1]; + EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4)); +} + +TEST(Layout, CustomAlignment) { + constexpr Layout> x(1, 2); + alignas(max_align_t) unsigned char p[x.AllocSize()]; + EXPECT_EQ(10, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8)); +} + +TEST(Layout, OverAligned) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); + alignas(2 * M) unsigned char p[x.AllocSize()]; + EXPECT_EQ(2 * M + 3, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); +} + +TEST(Layout, Alignment) { + static_assert(Layout::Alignment() == 1, ""); + static_assert(Layout::Alignment() == 4, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout>::Alignment() == 64, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); +} + +TEST(Layout, ConstexprPartial) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); + static_assert(x.Partial(1).template Offset<1>() == 2 * M, ""); +} +// [from, to) +struct Region { + size_t from; + size_t to; +}; + +void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) { +#ifdef ADDRESS_SANITIZER + for (size_t i = 0; i != n; ++i) { + EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i)); + } +#endif +} + +template +void ExpectPoisoned(const unsigned char (&buf)[N], + std::initializer_list reg) { + size_t prev = 0; + for (const Region& r : reg) { + ExpectRegionPoisoned(buf + prev, r.from - prev, false); + ExpectRegionPoisoned(buf + r.from, r.to - r.from, true); + prev = r.to; + } + ExpectRegionPoisoned(buf + prev, N - prev, false); +} + +TEST(Layout, PoisonPadding) { + using L = Layout; + + constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize(); + { + constexpr auto x = L::Partial(); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {}); + } + { + constexpr auto x = L::Partial(1); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2, 3); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr auto x = L::Partial(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr L x(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } +} + +TEST(Layout, DebugString) { + { + constexpr auto x = Layout::Partial(); + EXPECT_EQ("@0(1)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1); + EXPECT_EQ("@0(1)[1]; @4(4)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2); + EXPECT_EQ("@0(1)[1]; @4(4)[2]; @12(1)", + x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2, 3); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)", + x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } + { + constexpr Layout x(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } +} + +TEST(Layout, CharTypes) { + constexpr Layout x(1); + alignas(max_align_t) char c[x.AllocSize()] = {}; + alignas(max_align_t) unsigned char uc[x.AllocSize()] = {}; + alignas(max_align_t) signed char sc[x.AllocSize()] = {}; + alignas(max_align_t) const char cc[x.AllocSize()] = {}; + alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {}; + alignas(max_align_t) const signed char csc[x.AllocSize()] = {}; + + Type(x.Pointer<0>(c)); + Type(x.Pointer<0>(uc)); + Type(x.Pointer<0>(sc)); + Type(x.Pointer<0>(cc)); + Type(x.Pointer<0>(cuc)); + Type(x.Pointer<0>(csc)); + + Type(x.Pointer(c)); + Type(x.Pointer(uc)); + Type(x.Pointer(sc)); + Type(x.Pointer(cc)); + Type(x.Pointer(cuc)); + Type(x.Pointer(csc)); + + Type>(x.Pointers(c)); + Type>(x.Pointers(uc)); + Type>(x.Pointers(sc)); + Type>(x.Pointers(cc)); + Type>(x.Pointers(cuc)); + Type>(x.Pointers(csc)); + + Type>(x.Slice<0>(c)); + Type>(x.Slice<0>(uc)); + Type>(x.Slice<0>(sc)); + Type>(x.Slice<0>(cc)); + Type>(x.Slice<0>(cuc)); + Type>(x.Slice<0>(csc)); + + Type>>(x.Slices(c)); + Type>>(x.Slices(uc)); + Type>>(x.Slices(sc)); + Type>>(x.Slices(cc)); + Type>>(x.Slices(cuc)); + Type>>(x.Slices(csc)); +} + +TEST(Layout, ConstElementType) { + constexpr Layout x(1); + alignas(int32_t) char c[x.AllocSize()] = {}; + const char* cc = c; + const int32_t* p = reinterpret_cast(cc); + + EXPECT_EQ(alignof(int32_t), x.Alignment()); + + EXPECT_EQ(0, x.Offset<0>()); + EXPECT_EQ(0, x.Offset()); + + EXPECT_THAT(x.Offsets(), ElementsAre(0)); + + EXPECT_EQ(1, x.Size<0>()); + EXPECT_EQ(1, x.Size()); + + EXPECT_THAT(x.Sizes(), ElementsAre(1)); + + EXPECT_EQ(sizeof(int32_t), x.AllocSize()); + + EXPECT_EQ(p, Type(x.Pointer<0>(c))); + EXPECT_EQ(p, Type(x.Pointer<0>(cc))); + + EXPECT_EQ(p, Type(x.Pointer(c))); + EXPECT_EQ(p, Type(x.Pointer(cc))); + + EXPECT_THAT(Type>(x.Pointers(c)), Tuple(p)); + EXPECT_THAT(Type>(x.Pointers(cc)), Tuple(p)); + + EXPECT_THAT(Type>(x.Slice<0>(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice<0>(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>(x.Slice(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>>(x.Slices(c)), + Tuple(IsSameSlice(Span(p, 1)))); + EXPECT_THAT(Type>>(x.Slices(cc)), + Tuple(IsSameSlice(Span(p, 1)))); +} + +namespace example { + +// Immutable move-only string with sizeof equal to sizeof(void*). The string +// size and the characters are kept in the same heap allocation. +class CompactString { + public: + CompactString(const char* s = "") { // NOLINT + const size_t size = strlen(s); + // size_t[1], followed by char[size + 1]. + // This statement doesn't allocate memory. + const L layout(1, size + 1); + // AllocSize() tells us how much memory we need to allocate for all our + // data. + p_.reset(new unsigned char[layout.AllocSize()]); + // If running under ASAN, mark the padding bytes, if any, to catch memory + // errors. + layout.PoisonPadding(p_.get()); + // Store the size in the allocation. + // Pointer() is a synonym for Pointer<0>(). + *layout.Pointer(p_.get()) = size; + // Store the characters in the allocation. + memcpy(layout.Pointer(p_.get()), s, size + 1); + } + + size_t size() const { + // Equivalent to reinterpret_cast(*p). + return *L::Partial().Pointer(p_.get()); + } + + const char* c_str() const { + // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). + // The argument in Partial(1) specifies that we have size_t[1] in front of + // the characters. + return L::Partial(1).Pointer(p_.get()); + } + + private: + // Our heap allocation contains a size_t followed by an array of chars. + using L = Layout; + std::unique_ptr p_; +}; + +TEST(CompactString, Works) { + CompactString s = "hello"; + EXPECT_EQ(5, s.size()); + EXPECT_STREQ("hello", s.c_str()); +} + +} // namespace example + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_hash_policy.h new file mode 100644 index 00000000..e8d89f63 --- /dev/null +++ b/absl/container/internal/node_hash_policy.h @@ -0,0 +1,90 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapts a policy for nodes. +// +// The node policy should model: +// +// struct Policy { +// // Returns a new node allocated and constructed using the allocator, using +// // the specified arguments. +// template +// value_type* new_element(Alloc* alloc, Args&&... args) const; +// +// // Destroys and deallocates node using the allocator. +// template +// void delete_element(Alloc* alloc, value_type* node) const; +// }; +// +// It may also optionally define `value()` and `apply()`. For documentation on +// these, see hash_policy_traits.h. + +#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ + +#include +#include +#include +#include +#include + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +template +struct node_hash_policy { + static_assert(std::is_lvalue_reference::value, ""); + + using slot_type = typename std::remove_cv< + typename std::remove_reference::type>::type*; + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + *slot = Policy::new_element(alloc, std::forward(args)...); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::delete_element(alloc, *slot); + } + + template + static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { + *new_slot = *old_slot; + } + + static size_t space_used(const slot_type* slot) { + if (slot == nullptr) return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } + + static Reference element(slot_type* slot) { return **slot; } + + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + template + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { + return P::apply(std::forward(ts)...); + } +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_hash_policy_test.cc new file mode 100644 index 00000000..a73c7bba --- /dev/null +++ b/absl/container/internal/node_hash_policy_test.cc @@ -0,0 +1,69 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/node_hash_policy.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_policy_traits.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::testing::Pointee; + +struct Policy : node_hash_policy { + using key_type = int; + using init_type = int; + + template + static int* new_element(Alloc* alloc, int value) { + return new int(value); + } + + template + static void delete_element(Alloc* alloc, int* elem) { + delete elem; + } +}; + +using NodePolicy = hash_policy_traits; + +struct NodeTest : ::testing::Test { + std::allocator alloc; + int n = 53; + int* a = &n; +}; + +TEST_F(NodeTest, ConstructDestroy) { + NodePolicy::construct(&alloc, &a, 42); + EXPECT_THAT(a, Pointee(42)); + NodePolicy::destroy(&alloc, &a); +} + +TEST_F(NodeTest, transfer) { + int s = 42; + int* b = &s; + NodePolicy::transfer(&alloc, &a, &b); + EXPECT_EQ(&s, a); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h new file mode 100644 index 00000000..53d4619a --- /dev/null +++ b/absl/container/internal/raw_hash_map.h @@ -0,0 +1,187 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ + +#include +#include +#include + +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +template +class raw_hash_map : public raw_hash_set { + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()))); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()))); + + using KeyArgImpl = container_internal::KeyArg::value && + IsTransparent::value>; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + // TODO(alkis): remove this assertion and verify that reference mapped_type is + // supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() {} + using raw_hash_map::raw_hash_set::raw_hash_set; + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { + return insert_or_assign(k, v).first; + } + + template ::value, int>::type = 0, + K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template ::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) { + auto it = this->find(key); + if (it == this->end()) std::abort(); + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const { + auto it = this->find(key); + if (it == this->end()) std::abort(); + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) { + return Policy::value(&*try_emplace(std::forward(key)).first); + } + + template + MappedReference

operator[](const key_arg& key) { + return Policy::value(&*try_emplace(key).first); + } + + private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward(k), std::forward(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward(v); + return {this->iterator_at(res.first), res.second}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(res.first), res.second}; + } +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc new file mode 100644 index 00000000..4e690dac --- /dev/null +++ b/absl/container/internal/raw_hash_set.cc @@ -0,0 +1,48 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include + +#include "absl/base/config.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +constexpr size_t Group::kWidth; + +// Returns "random" seed. +inline size_t RandomSeed() { +#if ABSL_HAVE_THREAD_LOCAL + static thread_local size_t counter = 0; + size_t value = ++counter; +#else // ABSL_HAVE_THREAD_LOCAL + static std::atomic counter(0); + size_t value = counter.fetch_add(1, std::memory_order_relaxed); +#endif // ABSL_HAVE_THREAD_LOCAL + return value ^ static_cast(reinterpret_cast(&counter)); +} + +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) { + // To avoid problems with weak hashes and single bit tests, we use % 13. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; +} + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h new file mode 100644 index 00000000..0c42e4ae --- /dev/null +++ b/absl/container/internal/raw_hash_set.h @@ -0,0 +1,1950 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// An open-addressing +// hashtable with quadratic probing. +// +// This is a low level hashtable on top of which different interfaces can be +// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. +// +// The table interface is similar to that of std::unordered_set. Notable +// differences are that most member functions support heterogeneous keys when +// BOTH the hash and eq functions are marked as transparent. They do so by +// providing a typedef called `is_transparent`. +// +// When heterogeneous lookup is enabled, functions that take key_type act as if +// they have an overload set like: +// +// iterator find(const key_type& key); +// template +// iterator find(const K& key); +// +// size_type erase(const key_type& key); +// template +// size_type erase(const K& key); +// +// std::pair equal_range(const key_type& key); +// template +// std::pair equal_range(const K& key); +// +// When heterogeneous lookup is disabled, only the explicit `key_type` overloads +// exist. +// +// find() also supports passing the hash explicitly: +// +// iterator find(const key_type& key, size_t hash); +// template +// iterator find(const U& key, size_t hash); +// +// In addition the pointer to element and iterator stability guarantees are +// weaker: all iterators and pointers are invalidated after a new element is +// inserted. +// +// IMPLEMENTATION DETAILS +// +// The table stores elements inline in a slot array. In addition to the slot +// array the table maintains some control state per slot. The extra state is one +// byte per slot and stores empty or deleted marks, or alternatively 7 bits from +// the hash of an occupied slot. The table is split into logical groups of +// slots, like so: +// +// Group 1 Group 2 Group 3 +// +---------------+---------------+---------------+ +// | | | | | | | | | | | | | | | | | | | | | | | | | +// +---------------+---------------+---------------+ +// +// On lookup the hash is split into two parts: +// - H2: 7 bits (those stored in the control bytes) +// - H1: the rest of the bits +// The groups are probed using H1. For each group the slots are matched to H2 in +// parallel. Because H2 is 7 bits (128 states) and the number of slots per group +// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. +// +// On insert, once the right group is found (as in lookup), its slots are +// filled in order. +// +// On erase a slot is cleared. In case the group did not have any empty slots +// before the erase, the erased slot is marked as deleted. +// +// Groups without empty slots (but maybe with deleted slots) extend the probe +// sequence. The probing algorithm is quadratic. Given N the number of groups, +// the probing function for the i'th probe is: +// +// P(0) = H1 % N +// +// P(i) = (P(i - 1) + i) % N +// +// This probing function guarantees that after N probes, all the groups of the +// table will be probed exactly once. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ + +#ifndef SWISSTABLE_HAVE_SSE2 +#if defined(__SSE2__) || \ + (defined(_MSC_VER) && \ + (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2))) +#define SWISSTABLE_HAVE_SSE2 1 +#else +#define SWISSTABLE_HAVE_SSE2 0 +#endif +#endif + +#ifndef SWISSTABLE_HAVE_SSSE3 +#ifdef __SSSE3__ +#define SWISSTABLE_HAVE_SSSE3 1 +#else +#define SWISSTABLE_HAVE_SSSE3 0 +#endif +#endif + +#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2 +#error "Bad configuration!" +#endif + +#if SWISSTABLE_HAVE_SSE2 +#include +#endif + +#if SWISSTABLE_HAVE_SSSE3 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/bits.h" +#include "absl/base/internal/endian.h" +#include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_policy_traits.h" +#include "absl/container/internal/hashtable_debug_hooks.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/types/optional.h" +#include "absl/utility/utility.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +template +class probe_seq { + public: + probe_seq(size_t hash, size_t mask) { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + size_t offset() const { return offset_; } + size_t offset(size_t i) const { return (offset_ + i) & mask_; } + + void next() { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index. The i-th probe in the probe sequence. + size_t index() const { return index_; } + + private: + size_t mask_; + size_t offset_; + size_t index_ = 0; +}; + +template +struct RequireUsableKey { + template + std::pair< + decltype(std::declval()(std::declval())), + decltype(std::declval()(std::declval(), + std::declval()))>* + operator()(const PassedKey&, const Args&...) const; +}; + +template +struct IsDecomposable : std::false_type {}; + +template +struct IsDecomposable< + absl::void_t(), + std::declval()...))>, + Policy, Hash, Eq, Ts...> : std::true_type {}; + +template +struct IsTransparent : std::false_type {}; +template +struct IsTransparent> + : std::true_type {}; + +// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. +template +constexpr bool IsNoThrowSwappable() { + using std::swap; + return noexcept(swap(std::declval(), std::declval())); +} + +template +int TrailingZeros(T x) { + return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64( + static_cast(x)) + : base_internal::CountTrailingZerosNonZero32( + static_cast(x)); +} + +template +int LeadingZeros(T x) { + return sizeof(T) == 8 + ? base_internal::CountLeadingZeros64(static_cast(x)) + : base_internal::CountLeadingZeros32(static_cast(x)); +} + +// An abstraction over a bitmask. It provides an easy way to iterate through the +// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE), +// this is a true bitmask. On non-SSE, platforms the arithematic used to +// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as +// either 0x00 or 0x80. +// +// For example: +// for (int i : BitMask(0x5)) -> yields 0, 2 +// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 +template +class BitMask { + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + // These are useful for unit tests (gunit). + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + explicit BitMask(T mask) : mask_(mask) {} + BitMask& operator++() { + mask_ &= (mask_ - 1); + return *this; + } + explicit operator bool() const { return mask_ != 0; } + int operator*() const { return LowestBitSet(); } + int LowestBitSet() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + int HighestBitSet() const { + return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) - + 1) >> + Shift; + } + + BitMask begin() const { return *this; } + BitMask end() const { return BitMask(0); } + + int TrailingZeros() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + int LeadingZeros() const { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift; + } + + private: + friend bool operator==(const BitMask& a, const BitMask& b) { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) { + return a.mask_ != b.mask_; + } + + T mask_; +}; + +using ctrl_t = signed char; +using h2_t = uint8_t; + +// The values here are selected for maximum performance. See the static asserts +// below for details. +enum Ctrl : ctrl_t { + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 +}; +static_assert( + kEmpty & kDeleted & kSentinel & 0x80, + "Special markers need to have the MSB to make checking for them efficient"); +static_assert(kEmpty < kSentinel && kDeleted < kSentinel, + "kEmpty and kDeleted must be smaller than kSentinel to make the " + "SIMD test of IsEmptyOrDeleted() efficient"); +static_assert(kSentinel == -1, + "kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(kEmpty == -128, + "kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); +static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F, + "kEmpty and kDeleted must share an unset bit that is not shared " + "by kSentinel to make the scalar test for MatchEmptyOrDeleted() " + "efficient"); +static_assert(kDeleted == -2, + "kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + +// A single block of empty control bytes for tables without any slots allocated. +// This enables removing a branch in the hot path of find(). +inline ctrl_t* EmptyGroup() { + alignas(16) static constexpr ctrl_t empty_group[] = { + kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, + kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty}; + return const_cast(empty_group); +} + +// Mixes a randomly generated per-process seed with `hash` and `ctrl` to +// randomize insertion order within groups. +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl); + +// Returns a hash seed. +// +// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure +// non-determinism of iteration order in most cases. +inline size_t HashSeed(const ctrl_t* ctrl) { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast(ctrl) >> 12; +} + +inline size_t H1(size_t hash, const ctrl_t* ctrl) { + return (hash >> 7) ^ HashSeed(ctrl); +} +inline ctrl_t H2(size_t hash) { return hash & 0x7F; } + +inline bool IsEmpty(ctrl_t c) { return c == kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= 0; } +inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } + +#if SWISSTABLE_HAVE_SSE2 + +// https://github.com/abseil/abseil-cpp/issues/209 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 +// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char +// Work around this by using the portable implementation of Group +// when using -funsigned-char under GCC. +inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { +#if defined(__GNUC__) && !defined(__clang__) + if (std::is_unsigned::value) { + const __m128i mask = _mm_set1_epi8(0x80); + const __m128i diff = _mm_subs_epi8(b, a); + return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); + } +#endif + return _mm_cmpgt_epi8(a, b); +} + +struct GroupSse2Impl { + static constexpr size_t kWidth = 16; // the number of slots per group + + explicit GroupSse2Impl(const ctrl_t* pos) { + ctrl = _mm_loadu_si128(reinterpret_cast(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + BitMask Match(h2_t hash) const { + auto match = _mm_set1_epi8(hash); + return BitMask( + _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))); + } + + // Returns a bitmask representing the positions of empty slots. + BitMask MatchEmpty() const { +#if SWISSTABLE_HAVE_SSSE3 + // This only works because kEmpty is -128. + return BitMask( + _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); +#else + return Match(kEmpty); +#endif + } + + // Returns a bitmask representing the positions of empty or deleted slots. + BitMask MatchEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return BitMask( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))); + } + + // Returns the number of trailing empty or deleted elements in the group. + uint32_t CountLeadingEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return TrailingZeros( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + auto msbs = _mm_set1_epi8(static_cast(-128)); + auto x126 = _mm_set1_epi8(126); +#if SWISSTABLE_HAVE_SSSE3 + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); +#else + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); +#endif + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } + + __m128i ctrl; +}; +#endif // SWISSTABLE_HAVE_SSE2 + +struct GroupPortableImpl { + static constexpr size_t kWidth = 8; + + explicit GroupPortableImpl(const ctrl_t* pos) + : ctrl(little_endian::Load64(pos)) {} + + BitMask Match(h2_t hash) const { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on kEmpty, kDeleted, kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask((x - lsbs) & ~x & msbs); + } + + BitMask MatchEmpty() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl & (~ctrl << 6)) & msbs); + } + + BitMask MatchEmptyOrDeleted() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl & (~ctrl << 7)) & msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL; + return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; +}; + +#if SWISSTABLE_HAVE_SSE2 +using Group = GroupSse2Impl; +#else +using Group = GroupPortableImpl; +#endif + +template +class raw_hash_set; + +inline bool IsValidCapacity(size_t n) { + return ((n + 1) & n) == 0 && n >= Group::kWidth - 1; +} + +// PRECONDITION: +// IsValidCapacity(capacity) +// ctrl[capacity] == kSentinel +// ctrl[i] != kSentinel for all i < capacity +// Applies mapping for every byte in ctrl: +// DELETED -> EMPTY +// EMPTY -> EMPTY +// FULL -> DELETED +inline void ConvertDeletedToEmptyAndFullToDeleted( + ctrl_t* ctrl, size_t capacity) { + assert(ctrl[capacity] == kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); + ctrl[capacity] = kSentinel; +} + +// Rounds up the capacity to the next power of 2 minus 1 and ensures it is +// greater or equal to Group::kWidth - 1. +inline size_t NormalizeCapacity(size_t n) { + constexpr size_t kMinCapacity = Group::kWidth - 1; + return n <= kMinCapacity + ? kMinCapacity + : (std::numeric_limits::max)() >> LeadingZeros(n); +} + +// The node_handle concept from C++17. +// We specialize node_handle for sets and maps. node_handle_base holds the +// common API of both. +template +class node_handle_base { + protected: + using PolicyTraits = hash_policy_traits; + using slot_type = typename PolicyTraits::slot_type; + + public: + using allocator_type = Alloc; + + constexpr node_handle_base() {} + node_handle_base(node_handle_base&& other) noexcept { + *this = std::move(other); + } + ~node_handle_base() { destroy(); } + node_handle_base& operator=(node_handle_base&& other) { + destroy(); + if (!other.empty()) { + alloc_ = other.alloc_; + PolicyTraits::transfer(alloc(), slot(), other.slot()); + other.reset(); + } + return *this; + } + + bool empty() const noexcept { return !alloc_; } + explicit operator bool() const noexcept { return !empty(); } + allocator_type get_allocator() const { return *alloc_; } + + protected: + template + friend class raw_hash_set; + + node_handle_base(const allocator_type& a, slot_type* s) : alloc_(a) { + PolicyTraits::transfer(alloc(), slot(), s); + } + + void destroy() { + if (!empty()) { + PolicyTraits::destroy(alloc(), slot()); + reset(); + } + } + + void reset() { + assert(alloc_.has_value()); + alloc_ = absl::nullopt; + } + + slot_type* slot() const { + assert(!empty()); + return reinterpret_cast(std::addressof(slot_space_)); + } + allocator_type* alloc() { return std::addressof(*alloc_); } + + private: + absl::optional alloc_; + mutable absl::aligned_storage_t + slot_space_; +}; + +// For sets. +template +class node_handle : public node_handle_base { + using Base = typename node_handle::node_handle_base; + + public: + using value_type = typename Base::PolicyTraits::value_type; + + constexpr node_handle() {} + + value_type& value() const { + return Base::PolicyTraits::element(this->slot()); + } + + private: + template + friend class raw_hash_set; + + node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {} +}; + +// For maps. +template +class node_handle> + : public node_handle_base { + using Base = typename node_handle::node_handle_base; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + + constexpr node_handle() {} + + auto key() const -> decltype(Base::PolicyTraits::key(this->slot())) { + return Base::PolicyTraits::key(this->slot()); + } + + mapped_type& mapped() const { + return Base::PolicyTraits::value( + &Base::PolicyTraits::element(this->slot())); + } + + private: + template + friend class raw_hash_set; + + node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {} +}; + +// Implement the insert_return_type<> concept of C++17. +template +struct insert_return_type { + Iterator position; + bool inserted; + NodeType node; +}; + +// Helper trait to allow or disallow arbitrary keys when the hash and +// eq functions are transparent. +// It is very important that the inner template is an alias and that the type it +// produces is not a dependent type. Otherwise, type deduction would fail. +template +struct KeyArg { + // Transparent. Forward `K`. + template + using type = K; +}; + +template <> +struct KeyArg { + // Not transparent. Always use `key_type`. + template + using type = key_type; +}; + +// Policy: a policy defines how to perform different operations on +// the slots of the hashtable (see hash_policy_traits.h for the full interface +// of policy). +// +// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The +// functor should accept a key and return size_t as hash. For best performance +// it is important that the hash function provides high entropy across all bits +// of the hash. +// +// Eq: a (possibly polymorphic) functor that compares two keys for equality. It +// should accept two (of possibly different type) keys and return a bool: true +// if they are equal, false if they are not. If two keys compare equal, then +// their hash values as defined by Hash MUST be equal. +// +// Allocator: an Allocator [http://devdocs.io/cpp/concept/allocator] with which +// the storage of the hashtable will be allocated and the elements will be +// constructed and destroyed. +template +class raw_hash_set { + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = container_internal::KeyArg::value && + IsTransparent::value>; + + public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = typename KeyArgImpl::template type; + + private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using Layout = absl::container_internal::Layout; + + static Layout MakeLayout(size_t capacity) { + assert(IsValidCapacity(capacity)); + return Layout(capacity + Group::kWidth + 1, capacity); + } + + using AllocTraits = absl::allocator_traits; + using SlotAlloc = typename absl::allocator_traits< + allocator_type>::template rebind_alloc; + using SlotAllocTraits = typename absl::allocator_traits< + allocator_type>::template rebind_traits; + + static_assert(std::is_lvalue_reference::value, + "Policy::element() must return a reference"); + + template + struct SameAsElementReference + : std::is_same::type>::type, + typename std::remove_cv< + typename std::remove_reference::type>::type> {}; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template + using RequiresInsertable = typename std::enable_if< + absl::disjunction, + SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + + public: + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + + class iterator { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + absl::conditional_t; + using pointer = absl::remove_reference_t*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() {} + + // PRECONDITION: not an end() iterator. + reference operator*() const { return PolicyTraits::element(slot_); } + + // PRECONDITION: not an end() iterator. + pointer operator->() const { return &operator*(); } + + // PRECONDITION: not an end() iterator. + iterator& operator++() { + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) { + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end() + iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {} + + void skip_empty_or_deleted() { + while (IsEmptyOrDeleted(*ctrl_)) { + // ctrl is not necessarily aligned to Group::kWidth. It is also likely + // to read past the space for ctrl bytes and into slots. This is ok + // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there + // is no way to read outside the combined slot array. + uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + } + + ctrl_t* ctrl_ = nullptr; + slot_type* slot_; + }; + + class const_iterator { + friend class raw_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() {} + // Implicit construction from iterator. + const_iterator(iterator i) : inner_(std::move(i)) {} + + reference operator*() const { return *inner_; } + pointer operator->() const { return inner_.operator->(); } + + const_iterator& operator++() { + ++inner_; + return *this; + } + const_iterator operator++(int) { return inner_++; } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot) + : inner_(const_cast(ctrl), const_cast(slot)) {} + + iterator inner_; + }; + + using node_type = container_internal::node_handle; + + raw_hash_set() noexcept( + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value) {} + + explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), + const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { + if (bucket_count) { + capacity_ = NormalizeCapacity(bucket_count); + growth_left() = static_cast(capacity_ * kMaxLoadFactor); + initialize_slots(); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, + const allocator_type& alloc) + : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) + : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} + + explicit raw_hash_set(const allocator_type& alloc) + : raw_hash_set(0, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(bucket_count, hash, eq, alloc) { + insert(first, last); + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) + : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then copies + // // the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // absl::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // absl::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, + const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(const raw_hash_set& that) + : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( + that.alloc_ref())) {} + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) + : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { + reserve(that.size()); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + const size_t i = find_first_non_full(hash); + set_ctrl(i, H2(hash)); + emplace_at(i, v); + } + size_ = that.size(); + growth_left() -= that.size(); + } + + raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value) + : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), + slots_(absl::exchange(that.slots_, nullptr)), + size_(absl::exchange(that.size_, 0)), + capacity_(absl::exchange(that.capacity_, 0)), + // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function, moving it + // would create a nullptr functor that cannot be called. + settings_(that.settings_) { + // growth_left was copied above, reset the one from `that`. + that.growth_left() = 0; + } + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) + : ctrl_(EmptyGroup()), + slots_(nullptr), + size_(0), + capacity_(0), + settings_(0, that.hash_ref(), that.eq_ref(), a) { + if (a == that.alloc_ref()) { + std::swap(ctrl_, that.ctrl_); + std::swap(slots_, that.slots_); + std::swap(size_, that.size_); + std::swap(capacity_, that.capacity_); + std::swap(growth_left(), that.growth_left()); + } else { + reserve(that.size()); + // Note: this will copy elements of dense_set and unordered_set instead of + // moving them. This can be fixed if it ever becomes an issue. + for (auto& elem : that) insert(std::move(elem)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) { + raw_hash_set tmp(that, + AllocTraits::propagate_on_container_copy_assignment::value + ? that.alloc_ref() + : alloc_ref()); + swap(tmp); + return *this; + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + absl::allocator_traits::is_always_equal::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_assignable::value) { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment()); + } + + ~raw_hash_set() { destroy_slots(); } + + iterator begin() { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() { return {ctrl_ + capacity_}; } + + const_iterator begin() const { + return const_cast(this)->begin(); + } + const_iterator end() const { return const_cast(this)->end(); } + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + bool empty() const { return !size(); } + size_t size() const { return size_; } + size_t capacity() const { return capacity_; } + size_t max_size() const { return (std::numeric_limits::max)(); } + + void clear() { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + if (capacity_ > 127) { + destroy_slots(); + } else if (capacity_) { + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + size_ = 0; + reset_ctrl(); + growth_left() = static_cast(capacity_ * kMaxLoadFactor); + } + assert(empty()); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + template = 0, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + std::pair insert(T&& value) { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_set s; + // s.insert({"abc", 42}); + std::pair insert(init_type&& value) { + return emplace(std::move(value)); + } + + template = 0, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + iterator insert(const_iterator, T&& value) { + return insert(std::forward(value)).first; + } + + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, const T& value) { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + for (; first != last; ++first) insert(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) { + if (!node) return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(node.slot()); + auto res = PolicyTraits::apply( + InsertSlot{*this, std::move(*node.slot())}, elem); + if (res.second) { + node.reset(); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, node_type&& node) { + return insert(std::move(node)).first; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + return PolicyTraits::apply(EmplaceDecomposable{*this}, + std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + typename std::aligned_storage::type + raw; + slot_type* slot = reinterpret_cast(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls f with one argument of type raw_hash_set::constructor. f + // MUST call raw_hash_set::constructor with arguments as if a + // raw_hash_set::value_type is constructed, otherwise the behavior is + // undefined. + // + // For example: + // + // std::unordered_set s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor { + friend class raw_hash_set; + + public: + template + void operator()(Args&&... args) const { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} + + allocator_type* alloc_; + slot_type** slot_; + }; + + template + iterator lazy_emplace(const key_arg& key, F&& f) { + auto res = find_or_prepare_insert(key); + if (res.second) { + slot_type* slot = slots_ + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template + size_type erase(const key_arg& key) { + auto it = find(key); + if (it == end()) return 0; + erase(it); + return 1; + } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). In + // order to erase while iterating across a map, use the following idiom (which + // also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // if () { + // m.erase(it++); + // } else { + // ++it; + // } + // } + void erase(const_iterator cit) { erase(cit.inner_); } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + void erase(iterator it) { + assert(it != end()); + PolicyTraits::destroy(&alloc_ref(), it.slot_); + erase_meta_only(it); + } + + iterator erase(const_iterator first, const_iterator last) { + while (first != last) { + erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template + void merge(raw_hash_set& src) { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e; ++it) { + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, + PolicyTraits::element(it.slot_)) + .second) { + src.erase_meta_only(it); + } + } + } + + template + void merge(raw_hash_set&& src) { + merge(src); + } + + node_type extract(const_iterator position) { + node_type node(alloc_ref(), position.inner_.slot_); + erase_meta_only(position); + return node; + } + + template < + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable() && IsNoThrowSwappable() && + (!AllocTraits::propagate_on_container_swap::value || + IsNoThrowSwappable())) { + using std::swap; + swap(ctrl_, that.ctrl_); + swap(slots_, that.slots_); + swap(size_, that.size_); + swap(capacity_, that.capacity_); + swap(growth_left(), that.growth_left()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + if (AllocTraits::propagate_on_container_swap::value) { + swap(alloc_ref(), that.alloc_ref()); + } else { + // If the allocators do not compare equal it is officially undefined + // behavior. We choose to do nothing. + } + } + + void rehash(size_t n) { + if (n == 0 && capacity_ == 0) return; + if (n == 0 && size_ == 0) return destroy_slots(); + auto m = NormalizeCapacity(std::max(n, NumSlotsFast(size()))); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity_) { + resize(m); + } + } + + void reserve(size_t n) { + rehash(NumSlotsFast(n)); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template + size_t count(const key_arg& key) const { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + template + void prefetch(const key_arg& key) const { + (void)key; +#if defined(__GNUC__) + auto seq = probe(hash_ref()(key)); + __builtin_prefetch(static_cast(ctrl_ + seq.offset())); + __builtin_prefetch(static_cast(slots_ + seq.offset())); +#endif // __GNUC__ + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template + iterator find(const key_arg& key, size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end(); + seq.next(); + } + } + template + iterator find(const key_arg& key) { + return find(key, hash_ref()(key)); + } + + template + const_iterator find(const key_arg& key, size_t hash) const { + return const_cast(this)->find(key, hash); + } + template + const_iterator find(const key_arg& key) const { + return find(key, hash_ref()(key)); + } + + template + bool contains(const key_arg& key) const { + return find(key) != end(); + } + + template + std::pair equal_range(const key_arg& key) { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + template + std::pair equal_range( + const key_arg& key) const { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const { return capacity_; } + float load_factor() const { + return capacity_ ? static_cast(size()) / capacity_ : 0.0; + } + float max_load_factor() const { return 1.0f; } + void max_load_factor(float) { + // Does nothing. + } + + hasher hash_function() const { return hash_ref(); } + key_equal key_eq() const { return eq_ref(); } + allocator_type get_allocator() const { return alloc_ref(); } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { + if (a.size() != b.size()) return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) std::swap(outer, inner); + for (const value_type& elem : *outer) + if (!inner->has_element(elem)) return false; + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { + return !(a == b); + } + + friend void swap(raw_hash_set& a, + raw_hash_set& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); + } + + private: + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + struct FindElement { + template + const_iterator operator()(const K& key, Args&&...) const { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement { + template + size_t operator()(const K& key, Args&&...) const { + return h(key); + } + const hasher& h; + }; + + template + struct EqualElement { + template + bool operator()(const K2& lhs, Args&&...) const { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + struct EmplaceDecomposable { + template + std::pair operator()(const K& key, Args&&... args) const { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + s.emplace_at(res.first, std::forward(args)...); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + }; + + template + struct InsertSlot { + template + std::pair operator()(const K& key, Args&&...) && { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); + } else if (do_destroy) { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + // Computes std::ceil(n / kMaxLoadFactor). Faster than calling std::ceil. + static inline size_t NumSlotsFast(size_t n) { + return static_cast( + (n * kMaxLoadFactorDenominator + (kMaxLoadFactorNumerator - 1)) / + kMaxLoadFactorNumerator); + } + + // "erases" the object from the container, except that it doesn't actually + // destroy the object. It only updates all the metadata of the class. + // This can be used in conjunction with Policy::transfer to move the object to + // another place. + void erase_meta_only(const_iterator it) { + assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); + --size_; + const size_t index = it.inner_.ctrl_ - ctrl_; + const size_t index_before = (index - Group::kWidth) & capacity_; + const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); + + // We count how many consecutive non empties we have to the right and to the + // left of `it`. If the sum is >= kWidth then there is at least one probe + // window that might have seen a full group. + bool was_never_full = + empty_before && empty_after && + static_cast(empty_after.TrailingZeros() + + empty_before.LeadingZeros()) < Group::kWidth; + + set_ctrl(index, was_never_full ? kEmpty : kDeleted); + growth_left() += was_never_full; + } + + void initialize_slots() { + assert(capacity_); + auto layout = MakeLayout(capacity_); + char* mem = static_cast( + Allocate(&alloc_ref(), layout.AllocSize())); + ctrl_ = reinterpret_cast(layout.template Pointer<0>(mem)); + slots_ = layout.template Pointer<1>(mem); + reset_ctrl(); + growth_left() = static_cast(capacity_ * kMaxLoadFactor) - size_; + } + + void destroy_slots() { + if (!capacity_) return; + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + auto layout = MakeLayout(capacity_); + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + Deallocate(&alloc_ref(), ctrl_, layout.AllocSize()); + ctrl_ = EmptyGroup(); + slots_ = nullptr; + size_ = 0; + capacity_ = 0; + growth_left() = 0; + } + + void resize(size_t new_capacity) { + assert(IsValidCapacity(new_capacity)); + auto* old_ctrl = ctrl_; + auto* old_slots = slots_; + const size_t old_capacity = capacity_; + capacity_ = new_capacity; + initialize_slots(); + + for (size_t i = 0; i != old_capacity; ++i) { + if (IsFull(old_ctrl[i])) { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(old_slots + i)); + size_t new_i = find_first_non_full(hash); + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); + } + } + if (old_capacity) { + SanitizerUnpoisonMemoryRegion(old_slots, + sizeof(slot_type) * old_capacity); + auto layout = MakeLayout(old_capacity); + Deallocate(&alloc_ref(), old_ctrl, + layout.AllocSize()); + } + } + + void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { + assert(IsValidCapacity(capacity_)); + // Algorithm: + // - mark all DELETED slots as EMPTY + // - mark all FULL slots as DELETED + // - for each slot marked as DELETED + // hash = Hash(element) + // target = find_first_non_full(hash) + // if target is in the same group + // mark slot as FULL + // else if target is EMPTY + // transfer element to target + // mark slot as EMPTY + // mark target as FULL + // else if target is DELETED + // swap current element with target element + // mark target as FULL + // repeat procedure for current slot with moved from element (target) + ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); + typename std::aligned_storage::type + raw; + slot_type* slot = reinterpret_cast(&raw); + for (size_t i = 0; i != capacity_; ++i) { + if (!IsDeleted(ctrl_[i])) continue; + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(slots_ + i)); + size_t new_i = find_first_non_full(hash); + + // Verify if the old and new i fall within the same group wrt the hash. + // If they do, we don't need to move the object as it falls already in the + // best probe we can. + const auto probe_index = [&](size_t pos) { + return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; + }; + + // Element doesn't move. + if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { + set_ctrl(i, H2(hash)); + continue; + } + if (IsEmpty(ctrl_[new_i])) { + // Transfer element to the empty spot. + // set_ctrl poisons/unpoisons the slots so we have to call it at the + // right time. + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); + set_ctrl(i, kEmpty); + } else { + assert(IsDeleted(ctrl_[new_i])); + set_ctrl(new_i, H2(hash)); + // Until we are done rehashing, DELETED marks previously FULL slots. + // Swap i and new_i elements. + PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); + PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); + --i; // repeat + } + } + growth_left() = static_cast(capacity_ * kMaxLoadFactor) - size_; + } + + void rehash_and_grow_if_necessary() { + if (capacity_ == 0) { + resize(Group::kWidth - 1); + } else if (size() <= kMaxLoadFactor / 2 * capacity_) { + // Squash DELETED without growing if there is enough capacity. + drop_deletes_without_resize(); + } else { + // Otherwise grow the container. + resize(capacity_ * 2 + 1); + } + } + + bool has_element(const value_type& elem) const { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == + elem)) + return true; + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false; + seq.next(); + assert(seq.index() < capacity_ && "full table!"); + } + return false; + } + + // Probes the raw_hash_set with the probe sequence for hash and returns the + // pointer to the first empty or deleted slot. + // NOTE: this function must work with tables having both kEmpty and kDelete + // in one group. Such tables appears during drop_deletes_without_resize. + // + // This function is very useful when insertions happen and: + // - the input is already a set + // - there are enough slots + // - the element with the hash is not in the table + size_t find_first_non_full(size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + auto mask = g.MatchEmptyOrDeleted(); + if (mask) { +#if !defined(NDEBUG) + // We want to force small tables to have random entries too, so + // in debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (ShouldInsertBackwards(hash, ctrl_)) + return seq.offset(mask.HighestBitSet()); + else + return seq.offset(mask.LowestBitSet()); +#else + return seq.offset(mask.LowestBitSet()); +#endif + } + assert(seq.index() < capacity_ && "full table!"); + seq.next(); + } + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { + raw_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { + raw_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + + protected: + template + std::pair find_or_prepare_insert(const K& key) { + auto hash = hash_ref()(key); + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return {seq.offset(i), false}; + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break; + seq.next(); + } + return {prepare_insert(hash), true}; + } + + size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { + size_t target = find_first_non_full(hash); + if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target]))) { + rehash_and_grow_if_necessary(); + target = find_first_non_full(hash); + } + ++size_; + growth_left() -= IsEmpty(ctrl_[target]); + set_ctrl(target, H2(hash)); + return target; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). + template + void emplace_at(size_t i, Args&&... args) { + PolicyTraits::construct(&alloc_ref(), slots_ + i, + std::forward(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == + iterator_at(i) && + "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } + const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } + + private: + friend struct RawHashSetTestOnlyAccess; + + probe_seq probe(size_t hash) const { + return probe_seq(H1(hash, ctrl_), capacity_); + } + + // Reset all ctrl bytes back to kEmpty, except the sentinel. + void reset_ctrl() { + std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); + ctrl_[capacity_] = kSentinel; + SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + } + + // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at + // the end too. + void set_ctrl(size_t i, ctrl_t h) { + assert(i < capacity_); + + if (IsFull(h)) { + SanitizerUnpoisonObject(slots_ + i); + } else { + SanitizerPoisonObject(slots_ + i); + } + + ctrl_[i] = h; + ctrl_[((i - Group::kWidth) & capacity_) + Group::kWidth] = h; + } + + size_t& growth_left() { return settings_.template get<0>(); } + + hasher& hash_ref() { return settings_.template get<1>(); } + const hasher& hash_ref() const { return settings_.template get<1>(); } + key_equal& eq_ref() { return settings_.template get<2>(); } + const key_equal& eq_ref() const { return settings_.template get<2>(); } + allocator_type& alloc_ref() { return settings_.template get<3>(); } + const allocator_type& alloc_ref() const { + return settings_.template get<3>(); + } + + // On average each group has 2 empty slot (for the vectorized case). + static constexpr int64_t kMaxLoadFactorNumerator = 14; + static constexpr int64_t kMaxLoadFactorDenominator = 16; + static constexpr float kMaxLoadFactor = + 1.0 * kMaxLoadFactorNumerator / kMaxLoadFactorDenominator; + + // TODO(alkis): Investigate removing some of these fields: + // - ctrl/slots can be derived from each other + // - size can be moved into the slot array + ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t] + slot_type* slots_ = nullptr; // [capacity * slot_type] + size_t size_ = 0; // number of full slots + size_t capacity_ = 0; // total number of slots + absl::container_internal::CompressedTuple + settings_{0, hasher{}, key_equal{}, allocator_type{}}; +}; + +namespace hashtable_debug_internal { +template +struct HashtableDebugAccess> { + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, + const typename Set::key_type& key) { + size_t num_probes = 0; + size_t hash = set.hash_ref()(key); + auto seq = set.probe(hash); + while (true) { + container_internal::Group g{set.ctrl_ + seq.offset()}; + for (int i : g.Match(container_internal::H2(hash))) { + if (Traits::apply( + typename Set::template EqualElement{ + key, set.eq_ref()}, + Traits::element(set.slots_ + seq.offset(i)))) + return num_probes; + ++num_probes; + } + if (g.MatchEmpty()) return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) { + size_t capacity = c.capacity_; + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); + + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * c.size(); + } else { + for (size_t i = 0; i != capacity; ++i) { + if (container_internal::IsFull(c.ctrl_[i])) { + m += Traits::space_used(c.slots_ + i); + } + } + } + return m; + } + + static size_t LowerBoundAllocatedByteSize(size_t size) { + size_t capacity = container_internal::NormalizeCapacity( + std::ceil(size / Set::kMaxLoadFactor)); + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * size; + } + return m; + } +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc new file mode 100644 index 00000000..f5779d62 --- /dev/null +++ b/absl/container/internal/raw_hash_set_allocator_test.cc @@ -0,0 +1,430 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "gtest/gtest.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/container/internal/tracked.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +enum AllocSpec { + kPropagateOnCopy = 1, + kPropagateOnMove = 2, + kPropagateOnSwap = 4, +}; + +struct AllocState { + size_t num_allocs = 0; + std::set owned; +}; + +template +class CheckedAlloc { + public: + template + friend class CheckedAlloc; + + using value_type = T; + + CheckedAlloc() {} + explicit CheckedAlloc(size_t id) : id_(id) {} + CheckedAlloc(const CheckedAlloc&) = default; + CheckedAlloc& operator=(const CheckedAlloc&) = default; + + template + CheckedAlloc(const CheckedAlloc& that) + : id_(that.id_), state_(that.state_) {} + + template + struct rebind { + using other = CheckedAlloc; + }; + + using propagate_on_container_copy_assignment = + std::integral_constant; + + using propagate_on_container_move_assignment = + std::integral_constant; + + using propagate_on_container_swap = + std::integral_constant; + + CheckedAlloc select_on_container_copy_construction() const { + if (Spec & kPropagateOnCopy) return *this; + return {}; + } + + T* allocate(size_t n) { + T* ptr = std::allocator().allocate(n); + track_alloc(ptr); + return ptr; + } + void deallocate(T* ptr, size_t n) { + memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned. + track_dealloc(ptr); + return std::allocator().deallocate(ptr, n); + } + + friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) { + return !(a == b); + } + + size_t num_allocs() const { return state_->num_allocs; } + + void swap(CheckedAlloc& that) { + using std::swap; + swap(id_, that.id_); + swap(state_, that.state_); + } + + friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); } + + friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) { + return o << "alloc(" << a.id_ << ")"; + } + + private: + void track_alloc(void* ptr) { + AllocState* state = state_.get(); + ++state->num_allocs; + if (!state->owned.insert(ptr).second) + ADD_FAILURE() << *this << " got previously allocated memory: " << ptr; + } + void track_dealloc(void* ptr) { + if (state_->owned.erase(ptr) != 1) + ADD_FAILURE() << *this + << " deleting memory owned by another allocator: " << ptr; + } + + size_t id_ = std::numeric_limits::max(); + + std::shared_ptr state_ = std::make_shared(); +}; + +struct Identity { + int32_t operator()(int32_t v) const { return v; } +}; + +struct Policy { + using slot_type = Tracked; + using init_type = Tracked; + using key_type = int32_t; + + template + static void construct(allocator_type* alloc, slot_type* slot, + Args&&... args) { + std::allocator_traits::construct( + *alloc, slot, std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + template + static auto apply(F&& f, int32_t v) -> decltype(std::forward(f)(v, v)) { + return std::forward(f)(v, v); + } + + template + static auto apply(F&& f, const slot_type& v) + -> decltype(std::forward(f)(v.val(), v)) { + return std::forward(f)(v.val(), v); + } + + template + static auto apply(F&& f, slot_type&& v) + -> decltype(std::forward(f)(v.val(), std::move(v))) { + return std::forward(f)(v.val(), std::move(v)); + } + + static slot_type& element(slot_type* slot) { return *slot; } +}; + +template +struct PropagateTest : public ::testing::Test { + using Alloc = CheckedAlloc, Spec>; + + using Table = raw_hash_set, Alloc>; + + PropagateTest() { + EXPECT_EQ(a1, t1.get_allocator()); + EXPECT_NE(a2, t1.get_allocator()); + } + + Alloc a1 = Alloc(1); + Table t1 = Table(0, a1); + Alloc a2 = Alloc(2); +}; + +using PropagateOnAll = + PropagateTest; +using NoPropagateOnCopy = PropagateTest; +using NoPropagateOnMove = PropagateTest; + +TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); } + +TEST_F(PropagateOnAll, InsertAllocates) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, InsertDecomposes) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); + + EXPECT_FALSE(t1.insert(0).second); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, RehashMoves) { + auto it = t1.insert(0).first; + EXPECT_EQ(0, it->num_moves()); + t1.rehash(2 * t1.capacity()); + EXPECT_EQ(2, a1.num_allocs()); + it = t1.find(0); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, u.get_allocator().num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, Swap) { + auto it = t1.insert(0).first; + Table u(0, a2); + u.swap(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(a2, t1.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc new file mode 100644 index 00000000..302f9758 --- /dev/null +++ b/absl/container/internal/raw_hash_set_test.cc @@ -0,0 +1,1830 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +struct RawHashSetTestOnlyAccess { + template + static auto GetSlots(const C& c) -> decltype(c.slots_) { + return c.slots_; + } +}; + +namespace { + +using ::testing::DoubleNear; +using ::testing::ElementsAre; +using ::testing::Optional; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +TEST(Util, NormalizeCapacity) { + constexpr size_t kMinCapacity = Group::kWidth - 1; + EXPECT_EQ(kMinCapacity, NormalizeCapacity(0)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(1)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(2)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity)); + EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1)); + EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2)); +} + +TEST(Util, probe_seq) { + probe_seq<16> seq(0, 127); + auto gen = [&]() { + size_t res = seq.offset(); + seq.next(); + return res; + }; + std::vector offsets(8); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); + seq = probe_seq<16>(128, 127); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); +} + +TEST(BitMask, Smoke) { + EXPECT_FALSE((BitMask(0))); + EXPECT_TRUE((BitMask(5))); + + EXPECT_THAT((BitMask(0)), ElementsAre()); + EXPECT_THAT((BitMask(0x1)), ElementsAre(0)); + EXPECT_THAT((BitMask(0x2)), ElementsAre(1)); + EXPECT_THAT((BitMask(0x3)), ElementsAre(0, 1)); + EXPECT_THAT((BitMask(0x4)), ElementsAre(2)); + EXPECT_THAT((BitMask(0x5)), ElementsAre(0, 2)); + EXPECT_THAT((BitMask(0x55)), ElementsAre(0, 2, 4, 6)); + EXPECT_THAT((BitMask(0xAA)), ElementsAre(1, 3, 5, 7)); +} + +TEST(BitMask, WithShift) { + // See the non-SSE version of Group for details on what this math is for. + uint64_t ctrl = 0x1716151413121110; + uint64_t hash = 0x12; + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + uint64_t mask = (x - lsbs) & ~x & msbs; + EXPECT_EQ(0x0000000080800000, mask); + + BitMask b(mask); + EXPECT_EQ(*b, 2); +} + +TEST(BitMask, LeadingTrailing) { + EXPECT_EQ((BitMask(0b0001101001000000).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0b0001101001000000).TrailingZeros()), 6); + + EXPECT_EQ((BitMask(0b0000000000000001).LeadingZeros()), 15); + EXPECT_EQ((BitMask(0b0000000000000001).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0b1000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0b1000000000000000).TrailingZeros()), 15); + + EXPECT_EQ((BitMask(0x0000008080808000).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x0000008080808000).TrailingZeros()), 1); + + EXPECT_EQ((BitMask(0x0000000000000080).LeadingZeros()), 7); + EXPECT_EQ((BitMask(0x0000000000000080).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x8000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x8000000000000000).TrailingZeros()), 7); +} + +TEST(Group, EmptyGroup) { + for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h)); +} + +TEST(Group, Match) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); + EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); + EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); + EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); + EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MatchEmpty) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MatchEmptyOrDeleted) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Batch, DropDeletes) { + constexpr size_t kCapacity = 63; + constexpr size_t kGroupWidth = container_internal::Group::kWidth; + std::vector ctrl(kCapacity + 1 + kGroupWidth); + ctrl[kCapacity] = kSentinel; + std::vector pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted}; + for (size_t i = 0; i != kCapacity; ++i) { + ctrl[i] = pattern[i % pattern.size()]; + if (i < kGroupWidth - 1) + ctrl[i + kCapacity + 1] = pattern[i % pattern.size()]; + } + ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity); + ASSERT_EQ(ctrl[kCapacity], kSentinel); + for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) { + ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()]; + if (i == kCapacity) expected = kSentinel; + if (expected == kDeleted) expected = kEmpty; + if (IsFull(expected)) expected = kDeleted; + EXPECT_EQ(ctrl[i], expected) + << i << " " << int{pattern[i % pattern.size()]}; + } +} + +TEST(Group, CountLeadingEmptyOrDeleted) { + const std::vector empty_examples = {kEmpty, kDeleted}; + const std::vector full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel}; + + for (ctrl_t empty : empty_examples) { + std::vector e(Group::kWidth, empty); + EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted()); + for (ctrl_t full : full_examples) { + for (size_t i = 0; i != Group::kWidth; ++i) { + std::vector f(Group::kWidth, empty); + f[i] = full; + EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + std::vector f(Group::kWidth, empty); + f[Group::kWidth * 2 / 3] = full; + f[Group::kWidth / 2] = full; + EXPECT_EQ( + Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + } +} + +struct IntPolicy { + using slot_type = int64_t; + using key_type = int64_t; + using init_type = int64_t; + + static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } + static void destroy(void*, int64_t*) {} + static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { + *new_slot = *old_slot; + } + + static int64_t& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, int64_t x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +class StringPolicy { + template ::value>::type> + decltype(std::declval()( + std::declval(), std::piecewise_construct, + std::declval>(), + std::declval())) static apply_impl(F&& f, + std::pair, V> p) { + const absl::string_view& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); + } + + public: + struct slot_type { + struct ctor {}; + + template + slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} + + std::pair pair; + }; + + using key_type = std::string; + using init_type = std::pair; + + template + static void construct(allocator_type* alloc, slot_type* slot, Args... args) { + std::allocator_traits::construct( + *alloc, slot, typename slot_type::ctor(), std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(old_slot->pair)); + destroy(alloc, old_slot); + } + + static std::pair& element(slot_type* slot) { + return slot->pair; + } + + template + static auto apply(F&& f, Args&&... args) + -> decltype(apply_impl(std::forward(f), + PairArgs(std::forward(args)...))) { + return apply_impl(std::forward(f), + PairArgs(std::forward(args)...)); + } +}; + +struct StringHash : absl::Hash { + using is_transparent = void; +}; +struct StringEq : std::equal_to { + using is_transparent = void; +}; + +struct StringTable + : raw_hash_set> { + using Base = typename StringTable::raw_hash_set; + StringTable() {} + using Base::Base; +}; + +struct IntTable + : raw_hash_set, + std::equal_to, std::allocator> { + using Base = typename IntTable::raw_hash_set; + IntTable() {} + using Base::Base; +}; + +struct BadFastHash { + template + size_t operator()(const T&) const { + return 0; + } +}; + +struct BadTable : raw_hash_set, + std::allocator> { + using Base = typename BadTable::raw_hash_set; + BadTable() {} + using Base::Base; +}; + +TEST(Table, EmptyFunctorOptimization) { + static_assert(std::is_empty>::value, ""); + static_assert(std::is_empty>::value, ""); + + struct MockTable { + void* ctrl; + void* slots; + size_t size; + size_t capacity; + size_t growth_left; + }; + struct StatelessHash { + size_t operator()(absl::string_view) const { return 0; } + }; + struct StatefulHash : StatelessHash { + size_t dummy; + }; + + EXPECT_EQ( + sizeof(MockTable), + sizeof( + raw_hash_set, std::allocator>)); + + EXPECT_EQ( + sizeof(MockTable) + sizeof(StatefulHash), + sizeof( + raw_hash_set, std::allocator>)); +} + +TEST(Table, Empty) { + IntTable t; + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.empty()); +} + +#ifdef __GNUC__ +template +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) { + asm volatile("" : : "r,m"(v) : "memory"); +} +#endif + +TEST(Table, Prefetch) { + IntTable t; + t.emplace(1); + // Works for both present and absent keys. + t.prefetch(1); + t.prefetch(2); + + // Do not run in debug mode, when prefetch is not implemented, or when + // sanitizers are enabled. +#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \ + !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \ + !defined(UNDEFINED_BEHAVIOR_SANITIZER) + const auto now = [] { return absl::base_internal::CycleClock::Now(); }; + + // Make size enough to not fit in L2 cache (16.7 Mb) + static constexpr int size = 1 << 22; + for (int i = 0; i < size; ++i) t.insert(i); + + int64_t no_prefetch = 0, prefetch = 0; + for (int iter = 0; iter < 10; ++iter) { + int64_t time = now(); + for (int i = 0; i < size; ++i) { + DoNotOptimize(t.find(i)); + } + no_prefetch += now() - time; + + time = now(); + for (int i = 0; i < size; ++i) { + t.prefetch(i + 20); + DoNotOptimize(t.find(i)); + } + prefetch += now() - time; + } + + // no_prefetch is at least 30% slower. + EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3); +#endif +} + +TEST(Table, LookupEmpty) { + IntTable t; + auto it = t.find(0); + EXPECT_TRUE(it == t.end()); +} + +TEST(Table, Insert1) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find(0), 0); +} + +TEST(Table, Insert2) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_TRUE(t.find(1) == t.end()); + res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, InsertCollision) { + BadTable t; + EXPECT_TRUE(t.find(1) == t.end()); + auto res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(1, t.size()); + + EXPECT_TRUE(t.find(2) == t.end()); + res = t.emplace(2); + EXPECT_THAT(*res.first, 2); + EXPECT_TRUE(res.second); + EXPECT_EQ(2, t.size()); + + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); +} + +// Test that we do not add existent element in case we need to search through +// many groups with deleted elements +TEST(Table, InsertCollisionAndFindAfterDelete) { + BadTable t; // all elements go to the same group. + // Have at least 2 groups with Group::kWidth collisions + // plus some extra collisions in the last group. + constexpr size_t kNumInserts = Group::kWidth * 2 + 5; + for (size_t i = 0; i < kNumInserts; ++i) { + auto res = t.emplace(i); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, i); + EXPECT_EQ(i + 1, t.size()); + } + + // Remove elements one by one and check + // that we still can find all other elements. + for (size_t i = 0; i < kNumInserts; ++i) { + EXPECT_EQ(1, t.erase(i)) << i; + for (size_t j = i + 1; j < kNumInserts; ++j) { + EXPECT_THAT(*t.find(j), j); + auto res = t.emplace(j); + EXPECT_FALSE(res.second) << i << " " << j; + EXPECT_THAT(*res.first, j); + EXPECT_EQ(kNumInserts - i - 1, t.size()); + } + } + EXPECT_TRUE(t.empty()); +} + +TEST(Table, LazyEmplace) { + StringTable t; + bool called = false; + auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "ABC"); + }); + EXPECT_TRUE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); + called = false; + it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "DEF"); + }); + EXPECT_FALSE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); +} + +TEST(Table, ContainsEmpty) { + IntTable t; + + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains1) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + EXPECT_EQ(1, t.erase(0)); + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains2) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + t.clear(); + EXPECT_FALSE(t.contains(0)); +} + +int decompose_constructed; +struct DecomposeType { + DecomposeType(int i) : i(i) { // NOLINT + ++decompose_constructed; + } + + explicit DecomposeType(const char* d) : DecomposeType(*d) {} + + int i; +}; + +struct DecomposeHash { + using is_transparent = void; + size_t operator()(DecomposeType a) const { return a.i; } + size_t operator()(int a) const { return a; } + size_t operator()(const char* a) const { return *a; } +}; + +struct DecomposeEq { + using is_transparent = void; + bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + bool operator()(DecomposeType a, const char* b) const { return a.i == *b; } +}; + +struct DecomposePolicy { + using slot_type = DecomposeType; + using key_type = DecomposeType; + using init_type = DecomposeType; + + template + static void construct(void*, DecomposeType* slot, T&& v) { + *slot = DecomposeType(std::forward(v)); + } + static void destroy(void*, DecomposeType*) {} + static DecomposeType& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, const T& x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +template +void TestDecompose(bool construct_three) { + DecomposeType elem{0}; + const int one = 1; + const char* three_p = "3"; + const auto& three = three_p; + + raw_hash_set> set1; + + decompose_constructed = 0; + int expected_constructed = 0; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(elem); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(1); + EXPECT_EQ(++expected_constructed, decompose_constructed); + set1.emplace("3"); + EXPECT_EQ(++expected_constructed, decompose_constructed); + EXPECT_EQ(expected_constructed, decompose_constructed); + + { // insert(T&&) + set1.insert(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(const T&) + set1.insert(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, T&&) + set1.insert(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, const T&) + set1.insert(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace(...) + set1.emplace(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace("3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace_hint(...) + set1.emplace_hint(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), "3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } +} + +TEST(Table, Decompose) { + TestDecompose(false); + + struct TransparentHashIntOverload { + size_t operator()(DecomposeType a) const { return a.i; } + size_t operator()(int a) const { return a; } + }; + struct TransparentEqIntOverload { + bool operator()(DecomposeType a, DecomposeType b) const { + return a.i == b.i; + } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + }; + TestDecompose(true); + TestDecompose(true); + TestDecompose(true); +} + +// Returns the largest m such that a table with m elements has the same number +// of buckets as a table with n elements. +size_t MaxDensitySize(size_t n) { + IntTable t; + t.reserve(n); + for (size_t i = 0; i != n; ++i) t.emplace(i); + const size_t c = t.bucket_count(); + while (c == t.bucket_count()) t.emplace(n++); + return t.size() - 1; +} + +struct Modulo1000Hash { + size_t operator()(int x) const { return x % 1000; } +}; + +struct Modulo1000HashTable + : public raw_hash_set, + std::allocator> {}; + +// Test that rehash with no resize happen in case of many deleted slots. +TEST(Table, RehashWithNoResize) { + Modulo1000HashTable t; + // Adding the same length (and the same hash) strings + // to have at least kMinFullGroups groups + // with Group::kWidth collisions. Then fill up to MaxDensitySize; + const size_t kMinFullGroups = 7; + std::vector keys; + for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) { + int k = i * 1000; + t.emplace(k); + keys.push_back(k); + } + const size_t capacity = t.capacity(); + + // Remove elements from all groups except the first and the last one. + // All elements removed from full groups will be marked as kDeleted. + const size_t erase_begin = Group::kWidth / 2; + const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth; + for (size_t i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(1, t.erase(keys[i])) << i; + } + keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end); + + auto last_key = keys.back(); + size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key); + + // Make sure that we have to make a lot of probes for last key. + ASSERT_GT(last_key_num_probes, kMinFullGroups); + + int x = 1; + // Insert and erase one element, before inplace rehash happen. + while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) { + t.emplace(x); + ASSERT_EQ(capacity, t.capacity()); + // All elements should be there. + ASSERT_TRUE(t.find(x) != t.end()) << x; + for (const auto& k : keys) { + ASSERT_TRUE(t.find(k) != t.end()) << k; + } + t.erase(x); + ++x; + } +} + +TEST(Table, InsertEraseStressTest) { + IntTable t; + const size_t kMinElementCount = 250; + std::deque keys; + size_t i = 0; + for (; i < MaxDensitySize(kMinElementCount); ++i) { + t.emplace(i); + keys.push_back(i); + } + const size_t kNumIterations = 1000000; + for (; i < kNumIterations; ++i) { + ASSERT_EQ(1, t.erase(keys.front())); + keys.pop_front(); + t.emplace(i); + keys.push_back(i); + } +} + +TEST(Table, InsertOverloads) { + StringTable t; + // These should all trigger the insert(init_type) overload. + t.insert({{}, {}}); + t.insert({"ABC", {}}); + t.insert({"DEF", "!!!"}); + + EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""), + Pair("DEF", "!!!"))); +} + +TEST(Table, LargeTable) { + IntTable t; + for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40); + for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40)); +} + +// Timeout if copy is quadratic as it was in Rust. +TEST(Table, EnsureNonQuadraticAsInRust) { + static const size_t kLargeSize = 1 << 15; + + IntTable t; + for (size_t i = 0; i != kLargeSize; ++i) { + t.insert(i); + } + + // If this is quadratic, the test will timeout. + IntTable t2; + for (const auto& entry : t) t2.insert(entry); +} + +TEST(Table, ClearBug) { + IntTable t; + constexpr size_t capacity = container_internal::Group::kWidth - 1; + constexpr size_t max_size = capacity / 2; + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t original = reinterpret_cast(&*t.find(2)); + t.clear(); + ASSERT_EQ(capacity, t.capacity()); + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t second = reinterpret_cast(&*t.find(2)); + // We are checking that original and second are close enough to each other + // that they are probably still in the same group. This is not strictly + // guaranteed. + EXPECT_LT(std::abs(original - second), + capacity * sizeof(IntTable::value_type)); +} + +TEST(Table, Erase) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.erase(res.first); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +// Collect N bad keys by following algorithm: +// 1. Create an empty table and reserve it to 2 * N. +// 2. Insert N random elements. +// 3. Take first Group::kWidth - 1 to bad_keys array. +// 4. Clear the table without resize. +// 5. Go to point 2 while N keys not collected +std::vector CollectBadMergeKeys(size_t N) { + static constexpr int kGroupSize = Group::kWidth - 1; + + auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector { + for (size_t i = b; i != e; ++i) { + t->emplace(i); + } + std::vector res; + res.reserve(kGroupSize); + auto it = t->begin(); + for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) { + res.push_back(*it); + } + return res; + }; + + std::vector bad_keys; + bad_keys.reserve(N); + IntTable t; + t.reserve(N * 2); + + for (size_t b = 0; bad_keys.size() < N; b += N) { + auto keys = topk_range(b, b + N, &t); + bad_keys.insert(bad_keys.end(), keys.begin(), keys.end()); + t.erase(t.begin(), t.end()); + EXPECT_TRUE(t.empty()); + } + return bad_keys; +} + +struct ProbeStats { + // Number of elements with specific probe length over all tested tables. + std::vector all_probes_histogram; + // Ratios total_probe_length/size for every tested table. + std::vector single_table_ratios; + + friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) { + ProbeStats res = a; + res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(), + b.all_probes_histogram.size())); + std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(), + res.all_probes_histogram.begin(), + res.all_probes_histogram.begin(), std::plus()); + res.single_table_ratios.insert(res.single_table_ratios.end(), + b.single_table_ratios.begin(), + b.single_table_ratios.end()); + return res; + } + + // Average ratio total_probe_length/size over tables. + double AvgRatio() const { + return std::accumulate(single_table_ratios.begin(), + single_table_ratios.end(), 0.0) / + single_table_ratios.size(); + } + + // Maximum ratio total_probe_length/size over tables. + double MaxRatio() const { + return *std::max_element(single_table_ratios.begin(), + single_table_ratios.end()); + } + + // Percentile ratio total_probe_length/size over tables. + double PercentileRatio(double Percentile = 0.95) const { + auto r = single_table_ratios; + auto mid = r.begin() + static_cast(r.size() * Percentile); + if (mid != r.end()) { + std::nth_element(r.begin(), mid, r.end()); + return *mid; + } else { + return MaxRatio(); + } + } + + // Maximum probe length over all elements and all tables. + size_t MaxProbe() const { return all_probes_histogram.size(); } + + // Fraction of elements with specified probe length. + std::vector ProbeNormalizedHistogram() const { + double total_elements = std::accumulate(all_probes_histogram.begin(), + all_probes_histogram.end(), 0ull); + std::vector res; + for (size_t p : all_probes_histogram) { + res.push_back(p / total_elements); + } + return res; + } + + size_t PercentileProbe(double Percentile = 0.99) const { + size_t idx = 0; + for (double p : ProbeNormalizedHistogram()) { + if (Percentile > p) { + Percentile -= p; + ++idx; + } else { + return idx; + } + } + return idx; + } + + friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) { + out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio() + << ", PercentileRatio:" << s.PercentileRatio() + << ", MaxProbe:" << s.MaxProbe() << ", Probes=["; + for (double p : s.ProbeNormalizedHistogram()) { + out << p << ","; + } + out << "]}"; + + return out; + } +}; + +struct ExpectedStats { + double avg_ratio; + double max_ratio; + std::vector> pecentile_ratios; + std::vector> pecentile_probes; + + friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) { + out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio + << ", PercentileRatios: ["; + for (auto el : s.pecentile_ratios) { + out << el.first << ":" << el.second << ", "; + } + out << "], PercentileProbes: ["; + for (auto el : s.pecentile_probes) { + out << el.first << ":" << el.second << ", "; + } + out << "]}"; + + return out; + } +}; + +void VerifyStats(size_t size, const ExpectedStats& exp, + const ProbeStats& stats) { + EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats; + EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats; + for (auto pr : exp.pecentile_ratios) { + EXPECT_LE(stats.PercentileRatio(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } + + for (auto pr : exp.pecentile_probes) { + EXPECT_LE(stats.PercentileProbe(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } +} + +using ProbeStatsPerSize = std::map; + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table and reserve it to keys.size() * 2 +// 2. Insert all keys xored with seed +// 3. Collect ProbeStats from final table. +ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector& keys, + size_t num_iters) { + const size_t reserve_size = keys.size() * 2; + + ProbeStats stats; + + int64_t seed = 0x71b1a19b907d6e33; + while (num_iters--) { + seed = static_cast(static_cast(seed) * 17 + 13); + IntTable t1; + t1.reserve(reserve_size); + for (const auto& key : keys) { + t1.emplace(key ^ seed); + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + keys.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats XorSeedExpectedStats() { + constexpr bool kRandomizesInserts = +#if NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.05, + 1.0, + {{0.95, 0.5}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } else { + return {0.05, + 2.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 1.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 1.0, + {{0.95, 0.05}}, + {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}}; + } + } + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + return {}; +} +TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = + CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200); + } + auto expected = XorSeedExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table +// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13 +// 3. Collect ProbeStats from final table +ProbeStats CollectProbeStatsOnLinearlyTransformedKeys( + const std::vector& keys, size_t num_iters) { + ProbeStats stats; + + std::random_device rd; + std::mt19937 rng(rd()); + auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; }; + std::uniform_int_distribution dist(0, keys.size()-1); + while (num_iters--) { + IntTable t1; + size_t num_keys = keys.size() / 10; + size_t start = dist(rng); + for (size_t i = 0; i != num_keys; ++i) { + for (size_t j = 0; j != 10; ++j) { + t1.emplace(linear_transform(keys[(i + start) % keys.size()], j)); + } + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + t1.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats LinearTransformExpectedStats() { + constexpr bool kRandomizesInserts = +#if NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.1, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.15, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 0.4, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 0.2, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}}; + } + } + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + return {}; +} +TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = CollectProbeStatsOnLinearlyTransformedKeys( + CollectBadMergeKeys(size), 300); + } + auto expected = LinearTransformExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +TEST(Table, EraseCollision) { + BadTable t; + + // 1 2 3 + t.emplace(1); + t.emplace(2); + t.emplace(3); + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(3, t.size()); + + // 1 DELETED 3 + t.erase(t.find(2)); + EXPECT_THAT(*t.find(1), 1); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(2, t.size()); + + // DELETED DELETED 3 + t.erase(t.find(1)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(1, t.size()); + + // DELETED DELETED DELETED + t.erase(t.find(3)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_TRUE(t.find(3) == t.end()); + EXPECT_EQ(0, t.size()); +} + +TEST(Table, EraseInsertProbing) { + BadTable t(100); + + // 1 2 3 4 + t.emplace(1); + t.emplace(2); + t.emplace(3); + t.emplace(4); + + // 1 DELETED 3 DELETED + t.erase(t.find(2)); + t.erase(t.find(4)); + + // 1 10 3 11 12 + t.emplace(10); + t.emplace(11); + t.emplace(12); + + EXPECT_EQ(5, t.size()); + EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12)); +} + +TEST(Table, Clear) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.clear(); + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.clear(); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, Swap) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + IntTable u; + t.swap(u); + EXPECT_EQ(0, t.size()); + EXPECT_EQ(1, u.size()); + EXPECT_TRUE(t.find(0) == t.end()); + EXPECT_THAT(*u.find(0), 0); +} + +TEST(Table, Rehash) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.emplace(0); + t.emplace(1); + EXPECT_EQ(2, t.size()); + t.rehash(128); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, RehashDoesNotRehashWhenNotNecessary) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(1); + EXPECT_EQ(p, &*t.find(0)); +} + +TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) { + IntTable t; + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroDeallocatesEmptyTable) { + IntTable t; + t.emplace(0); + t.clear(); + EXPECT_NE(0, t.bucket_count()); + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroForcesRehash) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(0); + EXPECT_NE(p, &*t.find(0)); +} + +TEST(Table, ConstructFromInitList) { + using P = std::pair; + struct Q { + operator P() const { return {}; } + }; + StringTable t = {P(), Q(), {}, {{}, {}}}; +} + +TEST(Table, CopyConstruct) { + IntTable t; + t.max_load_factor(.321f); + t.emplace(0); + EXPECT_EQ(1, t.size()); + { + IntTable u(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u{t}; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u = t; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } +} + +TEST(Table, CopyConstructWithAlloc) { + StringTable t; + t.max_load_factor(.321f); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(t, Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +struct ExplicitAllocIntTable + : raw_hash_set, + std::equal_to, Alloc> { + ExplicitAllocIntTable() {} +}; + +TEST(Table, AllocWithExplicitCtor) { + ExplicitAllocIntTable t; + EXPECT_EQ(0, t.size()); +} + +TEST(Table, MoveConstruct) { + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u(std::move(t)); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u{std::move(t)}; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } +} + +TEST(Table, MoveConstructWithAlloc) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(std::move(t), Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopyAssign) { + StringTable t; + t.max_load_factor(.321f); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = t; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopySelfAssign) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + t = *&t; + EXPECT_EQ(1, t.size()); + EXPECT_EQ(lf, t.max_load_factor()); + EXPECT_THAT(*t.find("a"), Pair("a", "b")); +} + +TEST(Table, MoveAssign) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, Equality) { + StringTable t; + std::vector> v = {{"a", "b"}, {"aa", "bb"}}; + t.insert(std::begin(v), std::end(v)); + StringTable u = t; + EXPECT_EQ(u, t); +} + +TEST(Table, Equality2) { + StringTable t; + std::vector> v1 = {{"a", "b"}, {"aa", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, Equality3) { + StringTable t; + std::vector> v1 = {{"b", "b"}, {"bb", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, NumDeletedRegression) { + IntTable t; + t.emplace(0); + t.erase(t.find(0)); + // construct over a deleted slot. + t.emplace(0); + t.clear(); +} + +TEST(Table, FindFullDeletedRegression) { + IntTable t; + for (int i = 0; i < 1000; ++i) { + t.emplace(i); + t.erase(t.find(i)); + } + EXPECT_EQ(0, t.size()); +} + +TEST(Table, ReplacingDeletedSlotDoesNotRehash) { + size_t n; + { + // Compute n such that n is the maximum number of elements before rehash. + IntTable t; + t.emplace(0); + size_t c = t.bucket_count(); + for (n = 1; c == t.bucket_count(); ++n) t.emplace(n); + --n; + } + IntTable t; + t.rehash(n); + const size_t c = t.bucket_count(); + for (size_t i = 0; i != n; ++i) t.emplace(i); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; + t.erase(0); + t.emplace(0); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; +} + +TEST(Table, NoThrowMoveConstruct) { + ASSERT_TRUE( + std::is_nothrow_copy_constructible>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible< + std::equal_to>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible>::value); + EXPECT_TRUE(std::is_nothrow_move_constructible::value); +} + +TEST(Table, NoThrowMoveAssign) { + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE(std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + absl::allocator_traits>::is_always_equal::value); + EXPECT_TRUE(std::is_nothrow_move_assignable::value); +} + +TEST(Table, NoThrowSwappable) { + ASSERT_TRUE( + container_internal::IsNoThrowSwappable>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable< + std::equal_to>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable>()); + EXPECT_TRUE(container_internal::IsNoThrowSwappable()); +} + +TEST(Table, HeterogeneousLookup) { + struct Hash { + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { + ADD_FAILURE(); + return i; + } + }; + struct Eq { + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(int64_t a, double b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(double a, double b) const { + ADD_FAILURE(); + return a == b; + } + }; + + struct THash { + using is_transparent = void; + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { return i; } + }; + struct TEq { + using is_transparent = void; + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { return a == b; } + bool operator()(int64_t a, double b) const { return a == b; } + bool operator()(double a, double b) const { return a == b; } + }; + + raw_hash_set> s{0, 1, 2}; + // It will convert to int64_t before the query. + EXPECT_EQ(1, *s.find(double{1.1})); + + raw_hash_set> ts{0, 1, 2}; + // It will try to use the double, and fail to find the object. + EXPECT_TRUE(ts.find(1.1) == ts.end()); +} + +template +using CallFind = decltype(std::declval().find(17)); + +template +using CallErase = decltype(std::declval().erase(17)); + +template +using CallExtract = decltype(std::declval().extract(17)); + +template +using CallPrefetch = decltype(std::declval().prefetch(17)); + +template +using CallCount = decltype(std::declval().count(17)); + +template