Abseil Common Libraries (C++) (grcp 依赖) https://abseil.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

242 lines
8.9 KiB

7 years ago
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
7 years ago
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: optimization.h
// -----------------------------------------------------------------------------
//
// This header file defines portable macros for performance optimization.
#ifndef ABSL_BASE_OPTIMIZATION_H_
#define ABSL_BASE_OPTIMIZATION_H_
#include "absl/base/config.h"
// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
//
// Instructs the compiler to avoid optimizing tail-call recursion. Use of this
// macro is useful when you wish to preserve the existing function order within
// a stack trace for logging, debugging, or profiling purposes.
//
// Example:
//
// int f() {
// int result = g();
// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
// return result;
// }
#if defined(__pnacl__)
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
#elif defined(__clang__)
// Clang will not tail call given inline volatile assembly.
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
#elif defined(__GNUC__)
// GCC will not tail call given inline volatile assembly.
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
#elif defined(_MSC_VER)
#include <intrin.h>
7 years ago
// The __nop() intrinsic blocks the optimisation.
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop()
#else
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
#endif
// ABSL_CACHELINE_SIZE
//
// Explicitly defines the size of the L1 cache for purposes of alignment.
// Setting the cacheline size allows you to specify that certain objects be
// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations.
// (See below.)
//
// NOTE: this macro should be replaced with the following C++17 features, when
// those are generally available:
//
// * `std::hardware_constructive_interference_size`
// * `std::hardware_destructive_interference_size`
//
// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
// for more information.
#if defined(__GNUC__)
// Cache line alignment
#if defined(__i386__) || defined(__x86_64__)
#define ABSL_CACHELINE_SIZE 64
#elif defined(__powerpc64__)
#define ABSL_CACHELINE_SIZE 128
#elif defined(__aarch64__)
// We would need to read special register ctr_el0 to find out L1 dcache size.
// This value is a good estimate based on a real aarch64 machine.
#define ABSL_CACHELINE_SIZE 64
#elif defined(__arm__)
// Cache line sizes for ARM: These values are not strictly correct since
// cache line sizes depend on implementations, not architectures. There
// are even implementations with cache line sizes configurable at boot
// time.
#if defined(__ARM_ARCH_5T__)
#define ABSL_CACHELINE_SIZE 32
#elif defined(__ARM_ARCH_7A__)
#define ABSL_CACHELINE_SIZE 64
#endif
#endif
#ifndef ABSL_CACHELINE_SIZE
// A reasonable default guess. Note that overestimates tend to waste more
// space, while underestimates tend to waste more time.
#define ABSL_CACHELINE_SIZE 64
#endif
// ABSL_CACHELINE_ALIGNED
//
// Indicates that the declared object be cache aligned using
// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to
// load a set of related objects in the L1 cache for performance improvements.
// Cacheline aligning objects properly allows constructive memory sharing and
// prevents destructive (or "false") memory sharing.
//
// NOTE: this macro should be replaced with usage of `alignas()` using
// `std::hardware_constructive_interference_size` and/or
// `std::hardware_destructive_interference_size` when available within C++17.
//
// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
// for more information.
//
// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__`
// or `__declspec` attribute. For compilers where this is not known to work,
// the macro expands to nothing.
7 years ago
//
// No further guarantees are made here. The result of applying the macro
// to variables and types is always implementation-defined.
//
// WARNING: It is easy to use this attribute incorrectly, even to the point
// of causing bugs that are difficult to diagnose, crash, etc. It does not
// of itself guarantee that objects are aligned to a cache line.
//
// NOTE: Some compilers are picky about the locations of annotations such as
// this attribute, so prefer to put it at the beginning of your declaration.
// For example,
//
// ABSL_CACHELINE_ALIGNED static Foo* foo = ...
//
// class ABSL_CACHELINE_ALIGNED Bar { ...
//
7 years ago
// Recommendations:
//
// 1) Consult compiler documentation; this comment is not kept in sync as
// toolchains evolve.
// 2) Verify your use has the intended effect. This often requires inspecting
// the generated machine code.
// 3) Prefer applying this attribute to individual variables. Avoid
// applying it to types. This tends to localize the effect.
#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE)))
#elif defined(_MSC_VER)
#define ABSL_CACHELINE_SIZE 64
#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE))
#else
7 years ago
#define ABSL_CACHELINE_SIZE 64
#define ABSL_CACHELINE_ALIGNED
#endif
// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE
//
// Enables the compiler to prioritize compilation using static analysis for
// likely paths within a boolean branch.
//
// Example:
//
// if (ABSL_PREDICT_TRUE(expression)) {
// return result; // Faster if more likely
// } else {
// return 0;
// }
//
// Compilers can use the information that a certain branch is not likely to be
// taken (for instance, a CHECK failure) to optimize for the common case in
// the absence of better information (ie. compiling gcc with `-fprofile-arcs`).
//
// Recommendation: Modern CPUs dynamically predict branch execution paths,
// typically with accuracy greater than 97%. As a result, annotating every
// branch in a codebase is likely counterproductive; however, annotating
// specific branches that are both hot and consistently mispredicted is likely
// to yield performance improvements.
7 years ago
#if ABSL_HAVE_BUILTIN(__builtin_expect) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0))
Export of internal Abseil changes -- e54b9c7bbb0c58475676c268e2e19c69f4bce48a by Jorg Brown <jorg@google.com>: Tweak ABSL_PREDICT_TRUE slightly, for better code on some platforms and/or optimization levels. "false || (x)" is more verbose than "!!(x)", but ultimately more efficient. For example, given this code: void InitIfNecessary() { if (ABSL_PREDICT_TRUE(NeedsInit())) { SlowInitIfNecessary(); } } Clang with default optimization level will produce: Before this CL After this CL InitIfNecessary: InitIfNecessary: push rbp push rbp mov rbp, rsp mov rbp, rsp call NeedsInit call NeedsInit xor al, -1 xor al, -1 test al, 1 test al, 1 jne .LBB2_1 jne .LBB3_1 jmp .LBB2_2 jmp .LBB3_2 .LBB2_1: .LBB3_1: call SlowInitIfNecessary call SlowInitIfNecessary .LBB2_2: .LBB3_2: pop rbp pop rbp ret ret PiperOrigin-RevId: 276401386 -- 0a3c4dfd8342bf2b1b11a87f1c662c883f73cab7 by Abseil Team <absl-team@google.com>: Fix comment nit: sem_open => sem_init. The code calls sem_init, not sem_open, to initialize an unnamed semaphore. (sem_open creates or opens a named semaphore.) PiperOrigin-RevId: 276344072 -- b36a664e9459057509a90e83d3482e1d3a4c44c7 by Abseil Team <absl-team@google.com>: Fix typo in flat_hash_map.h: exchaged -> exchanged PiperOrigin-RevId: 276295792 -- 7bbd8d18276eb110c8335743e35fceb662ddf3d6 by Samuel Benzaquen <sbenza@google.com>: Add assertions to verify use of iterators. PiperOrigin-RevId: 276283300 -- 677398a8ffcb1f59182cffe57a4fe7ff147a0404 by Laramie Leavitt <lar@google.com>: Migrate distribution_impl.h/cc to generate_real.h/cc. Combine the methods RandU64To<Float,Double> into a single method: GenerateRealFromBits(). Remove rejection sampling from absl::uniform_real_distribution. PiperOrigin-RevId: 276158675 -- c60c9d11d24b0c546329d998e78e15a84b3153f5 by Abseil Team <absl-team@google.com>: Internal change PiperOrigin-RevId: 276126962 -- 4c840cab6a8d86efa29b397cafaf7520eece68cc by Andy Soffer <asoffer@google.com>: Update CMakeLists.txt to address https://github.com/abseil/abseil-cpp/issues/365. This does not cover every platform, but it does at least address the first-order issue of assuming gcc implies x86. PiperOrigin-RevId: 276116253 -- 98da366e6b5d51afe5d7ac6722126aca23d85ee6 by Abseil Team <absl-team@google.com>: Internal change PiperOrigin-RevId: 276097452 GitOrigin-RevId: e54b9c7bbb0c58475676c268e2e19c69f4bce48a Change-Id: I02d84454bb71ab21ad3d39650acf6cc6e36f58d7
5 years ago
#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true))
7 years ago
#else
- abacaab4b11a69dd4db627bd183571d7cabb8def Refinement to previous time.h edit (in this same github p... by Greg Falcon <gfalcon@google.com> - 64db19b773134c6c8004e3b23c9ca892efbf8bae Move SpinLock's adaptive spin count computation from a st... by Derek Mauro <dmauro@google.com> - 6f9533fb44a52485a7c2bbb9b4efc7bf8d6c359a Import of CCTZ from GitHub. by Abseil Team <absl-team@google.com> - a211d7255c986e8dd4ceada362c0d054a6a1969a Cleanup exception flags by Abseil Team <absl-team@google.com> - babdb29c590126fe9bba5229fe91034b5b5c358a Release time benchmarks. by Alex Strelnikov <strel@google.com> - 5803b32a3ff123d1fb57a0c471d199c818357c9f Release memutil microbenchmarks. by Alex Strelnikov <strel@google.com> - 5357d4890d30e80c53beb05af32500fb20e9402b Add parens around expansion of ABSL_PREDICT_{FALSE,TRUE} ... by Abseil Team <absl-team@google.com> - 32023f61a239a5f6b1c59e577bfe81b179bbcd2d Reformat build rule tag. by Alex Strelnikov <strel@google.com> - 833758ecf2b0cf7a42bbd50b5b127e416425c168 Release uint128 microbenchmarks. by Alex Strelnikov <strel@google.com> - c115a9bca1f944b90fdc78a56b2de176466b124f Disambiguate bitwise-not of size_type by Abseil Team <absl-team@google.com> - f6905f5b5f6e425792de646edafde440548d9346 Updates ConstructorTracker and TrackedObjects with 1) a m... by Abseil Team <absl-team@google.com> - 147c553bdd5d2db20a38f75c4d1ef973d6c709c5 Changes the absl::Duration factory functions to disallow ... by Greg Miller <jgm@google.com> - dba2b96d11b5264546b283ba452f2de1303b0f07 White space fix by Alex Strelnikov <strel@google.com> GitOrigin-RevId: abacaab4b11a69dd4db627bd183571d7cabb8def Change-Id: I6fa34f20d0b2f898e7b5475a603111413bb80a67
7 years ago
#define ABSL_PREDICT_FALSE(x) (x)
#define ABSL_PREDICT_TRUE(x) (x)
7 years ago
#endif
Export of internal Abseil changes -- 7a9e8d95f795be037aa2dce4e44809ad0166aaec by Samuel Benzaquen <sbenza@google.com>: Make end() iterator be nullptr. This makes the creation of and comparison with end() smaller and faster. `find()!=end()` becomes leaner. PiperOrigin-RevId: 304681605 -- 8f3024979446b391b79b1b60ada7d00a504d6aa6 by Derek Mauro <dmauro@google.com>: Fix Bazel's distdir detection and prefer double brackets (bash recommendation) PiperOrigin-RevId: 304615725 -- f1d709cb4b2b3743d548b814dd19602fb057a5e6 by Abseil Team <absl-team@google.com>: Internal change PiperOrigin-RevId: 304570545 -- 2bbfa5bda52057e1938a96c286ad33ff64e535e0 by Gennadiy Rozental <rogeeff@google.com>: Implement general storage case as aligned buffer. Aside from eliminating dynamic memory allocation for flag storage, we also saving 11 bytes per int flag, 15 bytes per double and string flag. PiperOrigin-RevId: 304511965 -- 9e1aed2a95d7d060f8b906fe8c67fc3ba537b521 by Derek Mauro <dmauro@google.com>: Use reserve to make a bad_alloc less likely in endian_test This happened once and shouldn't have happened, so it was probably just a flake, but might as well make this change. PiperOrigin-RevId: 304505572 -- c2faf22ba2d4d66753390e6959494214895581f0 by Gennadiy Rozental <rogeeff@google.com>: Use anonymous bit fields to enforce separation between const and mutable bit fields. We also move init_control field (which is now safe) to save 8 bytes per flag (based on size_tester output) PiperOrigin-RevId: 304505215 -- 7ec51250a84bb03e826b3caad64431e91748186a by Krzysztof Kosiński <krzysio@google.com>: Change the buffer size in AppendNumberUnit to constexpr. PiperOrigin-RevId: 304492779 -- a6c8db1be4f421ea7b7c02f7a01b4f48bad61883 by Gennadiy Rozental <rogeeff@google.com>: Add test cases for two word storage. Some additional tests were added for other storage kinds as well. These came about after I started to look into a coverage output and noticed that some cases (like reading flag values via reflection) were not covered by this test at all. It does not make sense to just add tests for two word values, so I've covered other storage kinds as well. PiperOrigin-RevId: 304432511 -- 2644ecc32e1215cd6451efcb2f1054fd77e7c812 by Abseil Team <absl-team@google.com>: Internal change PiperOrigin-RevId: 304254681 -- 4949a6b20c2bb4b9b2c811f439ccb893abc08df5 by Abseil Team <absl-team@google.com>: Internal change PiperOrigin-RevId: 304250274 GitOrigin-RevId: 7a9e8d95f795be037aa2dce4e44809ad0166aaec Change-Id: I01623de87355bec5cf87cc5932a1ca44cade9aae
5 years ago
// ABSL_INTERNAL_ASSUME(cond)
// Informs the compiler than a condition is always true and that it can assume
// it to be true for optimization purposes. The call has undefined behavior if
// the condition is false.
// In !NDEBUG mode, the condition is checked with an assert().
// NOTE: The expression must not have side effects, as it will only be evaluated
// in some compilation modes and not others.
//
// Example:
//
// int x = ...;
// ABSL_INTERNAL_ASSUME(x >= 0);
// // The compiler can optimize the division to a simple right shift using the
// // assumption specified above.
// int y = x / 16;
//
#if !defined(NDEBUG)
#define ABSL_INTERNAL_ASSUME(cond) assert(cond)
#elif ABSL_HAVE_BUILTIN(__builtin_assume)
#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond)
#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
#define ABSL_INTERNAL_ASSUME(cond) \
do { \
if (!(cond)) __builtin_unreachable(); \
} while (0)
#elif defined(_MSC_VER)
#define ABSL_INTERNAL_ASSUME(cond) __assume(cond)
#else
#define ABSL_INTERNAL_ASSUME(cond) \
do { \
static_cast<void>(false && (cond)); \
} while (0)
#endif
// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond)
// This macro forces small unique name on a static file level symbols like
// static local variables or static functions. This is intended to be used in
// macro definitions to optimize the cost of generated code. Do NOT use it on
// symbols exported from translation unit since it may casue a link time
// conflict.
//
// Example:
//
// #define MY_MACRO(txt)
// namespace {
// char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt;
// const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
// const char* VeryVeryLongFuncName() { return txt; }
// }
//
#if defined(__GNUC__)
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x)
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \
asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__))
#else
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME()
#endif
7 years ago
#endif // ABSL_BASE_OPTIMIZATION_H_