|
|
|
// Copyright 2021 The Abseil Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
#ifndef ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
|
|
|
|
#define ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstddef>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <type_traits>
|
|
|
|
|
|
|
|
#include "absl/base/attributes.h"
|
|
|
|
#include "absl/base/config.h"
|
|
|
|
#include "absl/base/internal/endian.h"
|
|
|
|
#include "absl/base/internal/invoke.h"
|
|
|
|
#include "absl/base/optimization.h"
|
|
|
|
#include "absl/container/internal/compressed_tuple.h"
|
|
|
|
#include "absl/meta/type_traits.h"
|
|
|
|
#include "absl/strings/string_view.h"
|
|
|
|
|
|
|
|
namespace absl {
|
|
|
|
ABSL_NAMESPACE_BEGIN
|
|
|
|
namespace cord_internal {
|
|
|
|
|
|
|
|
// The overhead of a vtable is too much for Cord, so we roll our own subclasses
|
|
|
|
// using only a single byte to differentiate classes from each other - the "tag"
|
|
|
|
// byte. Define the subclasses first so we can provide downcasting helper
|
|
|
|
// functions in the base class.
|
|
|
|
struct CordRep;
|
|
|
|
struct CordRepConcat;
|
|
|
|
struct CordRepExternal;
|
|
|
|
struct CordRepFlat;
|
|
|
|
struct CordRepSubstring;
|
|
|
|
struct CordRepCrc;
|
|
|
|
class CordRepRing;
|
|
|
|
class CordRepBtree;
|
|
|
|
|
|
|
|
class CordzInfo;
|
|
|
|
|
|
|
|
// Default feature enable states for cord ring buffers
|
|
|
|
enum CordFeatureDefaults {
|
|
|
|
kCordEnableRingBufferDefault = false,
|
|
|
|
kCordShallowSubcordsDefault = false
|
|
|
|
};
|
|
|
|
|
|
|
|
extern std::atomic<bool> cord_ring_buffer_enabled;
|
|
|
|
extern std::atomic<bool> shallow_subcords_enabled;
|
|
|
|
|
|
|
|
// `cord_btree_exhaustive_validation` can be set to force exhaustive validation
|
|
|
|
// in debug assertions, and code that calls `IsValid()` explicitly. By default,
|
|
|
|
// assertions should be relatively cheap and AssertValid() can easily lead to
|
|
|
|
// O(n^2) complexity as recursive / full tree validation is O(n).
|
|
|
|
extern std::atomic<bool> cord_btree_exhaustive_validation;
|
|
|
|
|
|
|
|
inline void enable_cord_ring_buffer(bool enable) {
|
|
|
|
cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void enable_shallow_subcords(bool enable) {
|
|
|
|
shallow_subcords_enabled.store(enable, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
enum Constants {
|
|
|
|
// The inlined size to use with absl::InlinedVector.
|
|
|
|
//
|
|
|
|
// Note: The InlinedVectors in this file (and in cord.h) do not need to use
|
|
|
|
// the same value for their inlined size. The fact that they do is historical.
|
|
|
|
// It may be desirable for each to use a different inlined size optimized for
|
|
|
|
// that InlinedVector's usage.
|
|
|
|
//
|
|
|
|
// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
|
|
|
|
// the inlined vector size (47 exists for backward compatibility).
|
|
|
|
kInlinedVectorSize = 47,
|
|
|
|
|
|
|
|
// Prefer copying blocks of at most this size, otherwise reference count.
|
|
|
|
kMaxBytesToCopy = 511
|
|
|
|
};
|
|
|
|
|
|
|
|
// Emits a fatal error "Unexpected node type: xyz" and aborts the program.
|
|
|
|
ABSL_ATTRIBUTE_NORETURN void LogFatalNodeType(CordRep* rep);
|
|
|
|
|
|
|
|
// Compact class for tracking the reference count and state flags for CordRep
|
|
|
|
// instances. Data is stored in an atomic int32_t for compactness and speed.
|
|
|
|
class RefcountAndFlags {
|
|
|
|
public:
|
|
|
|
constexpr RefcountAndFlags() : count_{kRefIncrement} {}
|
|
|
|
struct Immortal {};
|
|
|
|
explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
|
|
|
|
|
|
|
|
// Increments the reference count. Imposes no memory ordering.
|
|
|
|
inline void Increment() {
|
|
|
|
count_.fetch_add(kRefIncrement, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Asserts that the current refcount is greater than 0. If the refcount is
|
|
|
|
// greater than 1, decrements the reference count.
|
|
|
|
//
|
|
|
|
// Returns false if there are no references outstanding; true otherwise.
|
|
|
|
// Inserts barriers to ensure that state written before this method returns
|
|
|
|
// false will be visible to a thread that just observed this method returning
|
|
|
|
// false. Always returns false when the immortal bit is set.
|
|
|
|
inline bool Decrement() {
|
|
|
|
int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
|
|
|
|
assert(refcount > 0 || refcount & kImmortalFlag);
|
|
|
|
return refcount != kRefIncrement &&
|
|
|
|
(count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
|
|
|
|
kRefcountMask) != kRefIncrement;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Same as Decrement but expect that refcount is greater than 1.
|
|
|
|
inline bool DecrementExpectHighRefcount() {
|
|
|
|
int32_t refcount =
|
|
|
|
count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
|
|
|
|
kRefcountMask;
|
|
|
|
assert(refcount > 0 || refcount & kImmortalFlag);
|
|
|
|
return refcount != kRefIncrement;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the current reference count using acquire semantics.
|
|
|
|
inline size_t Get() const {
|
|
|
|
return static_cast<size_t>(count_.load(std::memory_order_acquire) >>
|
|
|
|
kNumFlags);
|
|
|
|
}
|
|
|
|
|
Export of internal Abseil changes
--
83e4cdf03a4d702b30e69204060de09e462e23c6 by Greg Falcon <gfalcon@google.com>:
Revert the crc addition to RefcountAndFlags, and restore related comments to their original state.
In development, the implementation of SetExpectedCrc() changed, and there is no longer a need to track the CRC status in the refcount.
Since the distinction between IsOne() and IsMutable() is subtle *and unused*, removing it now can help avoid subtle bugs in the future. This distinction can always be added back later, if it proves necessary.
Keep the reserved bit for now; all it costs is one extra mask instruction in the refcount checks, and space for extra state in Cord is always hard to find.
PiperOrigin-RevId: 408647038
--
ee67585cf66954176615271f50f8b278119dd138 by Greg Falcon <gfalcon@google.com>:
Implement Cord::SetExpectedChecksum() and Cord::ExpectedChecksum().
SetExpectedChecksum() will store a uint32_t out-of-band alongside a Cord's data. This value persists through copies and assignments. Mutating operations on a Cord cause the value to be forgotten. ExpectedChecksum() retrieves the stored value, if present.
This API is intended for storing a CRC32C checksum alongside data, allowing checksums to be passed through dataflows and validated at the final step. However, this API is agnostic to the meaning of the stored value. No CRC32C validation is performed by these new APIs.
This implementation adds a new CordRep node, CordRepCrc. A CordRepCrc may (currently) only live at the top of a tree. This allows traversal logic to be agnostic to these nodes, instead putting the needed branches at the mutation level. This also implements the property requested from API review, that any mutation is guaranteed to permanently forget the stored CRC.
PiperOrigin-RevId: 408611221
--
a86f592402b37c854ebdc77d2b9b425451a7a675 by Martijn Vels <mvels@google.com>:
Move 'ExtractResult' into CordRep
The result of an extract operation is logically identical for any tree implementation, and having a single type makes 'tree independent' implementation in cord.cc more concise.
PiperOrigin-RevId: 408332408
--
baa7647e21db59a87f75af9cac62172ce38a0f71 by Abseil Team <absl-team@google.com>:
Replace usages of `assert` macros with `ABSL_HARDENING_ASSERT`.
PiperOrigin-RevId: 408272133
--
c7658133d8662c39fa5035fc93a364c7c3d327e0 by Martijn Vels <mvels@google.com>:
Add CordRepBtree::ExtractAppendBuffer
PiperOrigin-RevId: 407944179
--
5775100363b5890ebfe710fadebf040445eab991 by Martijn Vels <mvels@google.com>:
Add CordRepConcat::ExtractAppendBuffer
PiperOrigin-RevId: 407932968
--
9f520ba1600a93352c78f644a369c7c76195ee86 by Greg Falcon <gfalcon@google.com>:
Add cordz tracking for crc nodes.
This also adds a new kSetExpectedChecksum method to the list of tracked methods. This is presently unused but will be used soon.
PiperOrigin-RevId: 407884120
GitOrigin-RevId: 83e4cdf03a4d702b30e69204060de09e462e23c6
Change-Id: I134ace2d87215813eaa60a282996a33884676c06
3 years ago
|
|
|
// Returns whether the atomic integer is 1.
|
|
|
|
// If the reference count is used in the conventional way, a
|
|
|
|
// reference count of 1 implies that the current thread owns the
|
|
|
|
// reference and no other thread shares it.
|
|
|
|
// This call performs the test for a reference count of one, and
|
|
|
|
// performs the memory barrier needed for the owning thread
|
|
|
|
// to act on the object, knowing that it has exclusive access to the
|
|
|
|
// object. Always returns false when the immortal bit is set.
|
|
|
|
inline bool IsOne() {
|
|
|
|
return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
|
|
|
|
kRefIncrement;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsImmortal() const {
|
|
|
|
return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// We reserve the bottom bits for flags.
|
|
|
|
// kImmortalBit indicates that this entity should never be collected; it is
|
|
|
|
// used for the StringConstant constructor to avoid collecting immutable
|
|
|
|
// constant cords.
|
|
|
|
// kReservedFlag is reserved for future use.
|
|
|
|
enum Flags {
|
|
|
|
kNumFlags = 2,
|
|
|
|
|
|
|
|
kImmortalFlag = 0x1,
|
Export of internal Abseil changes
--
83e4cdf03a4d702b30e69204060de09e462e23c6 by Greg Falcon <gfalcon@google.com>:
Revert the crc addition to RefcountAndFlags, and restore related comments to their original state.
In development, the implementation of SetExpectedCrc() changed, and there is no longer a need to track the CRC status in the refcount.
Since the distinction between IsOne() and IsMutable() is subtle *and unused*, removing it now can help avoid subtle bugs in the future. This distinction can always be added back later, if it proves necessary.
Keep the reserved bit for now; all it costs is one extra mask instruction in the refcount checks, and space for extra state in Cord is always hard to find.
PiperOrigin-RevId: 408647038
--
ee67585cf66954176615271f50f8b278119dd138 by Greg Falcon <gfalcon@google.com>:
Implement Cord::SetExpectedChecksum() and Cord::ExpectedChecksum().
SetExpectedChecksum() will store a uint32_t out-of-band alongside a Cord's data. This value persists through copies and assignments. Mutating operations on a Cord cause the value to be forgotten. ExpectedChecksum() retrieves the stored value, if present.
This API is intended for storing a CRC32C checksum alongside data, allowing checksums to be passed through dataflows and validated at the final step. However, this API is agnostic to the meaning of the stored value. No CRC32C validation is performed by these new APIs.
This implementation adds a new CordRep node, CordRepCrc. A CordRepCrc may (currently) only live at the top of a tree. This allows traversal logic to be agnostic to these nodes, instead putting the needed branches at the mutation level. This also implements the property requested from API review, that any mutation is guaranteed to permanently forget the stored CRC.
PiperOrigin-RevId: 408611221
--
a86f592402b37c854ebdc77d2b9b425451a7a675 by Martijn Vels <mvels@google.com>:
Move 'ExtractResult' into CordRep
The result of an extract operation is logically identical for any tree implementation, and having a single type makes 'tree independent' implementation in cord.cc more concise.
PiperOrigin-RevId: 408332408
--
baa7647e21db59a87f75af9cac62172ce38a0f71 by Abseil Team <absl-team@google.com>:
Replace usages of `assert` macros with `ABSL_HARDENING_ASSERT`.
PiperOrigin-RevId: 408272133
--
c7658133d8662c39fa5035fc93a364c7c3d327e0 by Martijn Vels <mvels@google.com>:
Add CordRepBtree::ExtractAppendBuffer
PiperOrigin-RevId: 407944179
--
5775100363b5890ebfe710fadebf040445eab991 by Martijn Vels <mvels@google.com>:
Add CordRepConcat::ExtractAppendBuffer
PiperOrigin-RevId: 407932968
--
9f520ba1600a93352c78f644a369c7c76195ee86 by Greg Falcon <gfalcon@google.com>:
Add cordz tracking for crc nodes.
This also adds a new kSetExpectedChecksum method to the list of tracked methods. This is presently unused but will be used soon.
PiperOrigin-RevId: 407884120
GitOrigin-RevId: 83e4cdf03a4d702b30e69204060de09e462e23c6
Change-Id: I134ace2d87215813eaa60a282996a33884676c06
3 years ago
|
|
|
kReservedFlag = 0x2,
|
|
|
|
kRefIncrement = (1 << kNumFlags),
|
|
|
|
|
|
|
|
// Bitmask to use when checking refcount by equality. This masks out
|
|
|
|
// all flags except kImmortalFlag, which is part of the refcount for
|
|
|
|
// purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
|
|
|
|
// if the immortal bit is set.)
|
Export of internal Abseil changes
--
83e4cdf03a4d702b30e69204060de09e462e23c6 by Greg Falcon <gfalcon@google.com>:
Revert the crc addition to RefcountAndFlags, and restore related comments to their original state.
In development, the implementation of SetExpectedCrc() changed, and there is no longer a need to track the CRC status in the refcount.
Since the distinction between IsOne() and IsMutable() is subtle *and unused*, removing it now can help avoid subtle bugs in the future. This distinction can always be added back later, if it proves necessary.
Keep the reserved bit for now; all it costs is one extra mask instruction in the refcount checks, and space for extra state in Cord is always hard to find.
PiperOrigin-RevId: 408647038
--
ee67585cf66954176615271f50f8b278119dd138 by Greg Falcon <gfalcon@google.com>:
Implement Cord::SetExpectedChecksum() and Cord::ExpectedChecksum().
SetExpectedChecksum() will store a uint32_t out-of-band alongside a Cord's data. This value persists through copies and assignments. Mutating operations on a Cord cause the value to be forgotten. ExpectedChecksum() retrieves the stored value, if present.
This API is intended for storing a CRC32C checksum alongside data, allowing checksums to be passed through dataflows and validated at the final step. However, this API is agnostic to the meaning of the stored value. No CRC32C validation is performed by these new APIs.
This implementation adds a new CordRep node, CordRepCrc. A CordRepCrc may (currently) only live at the top of a tree. This allows traversal logic to be agnostic to these nodes, instead putting the needed branches at the mutation level. This also implements the property requested from API review, that any mutation is guaranteed to permanently forget the stored CRC.
PiperOrigin-RevId: 408611221
--
a86f592402b37c854ebdc77d2b9b425451a7a675 by Martijn Vels <mvels@google.com>:
Move 'ExtractResult' into CordRep
The result of an extract operation is logically identical for any tree implementation, and having a single type makes 'tree independent' implementation in cord.cc more concise.
PiperOrigin-RevId: 408332408
--
baa7647e21db59a87f75af9cac62172ce38a0f71 by Abseil Team <absl-team@google.com>:
Replace usages of `assert` macros with `ABSL_HARDENING_ASSERT`.
PiperOrigin-RevId: 408272133
--
c7658133d8662c39fa5035fc93a364c7c3d327e0 by Martijn Vels <mvels@google.com>:
Add CordRepBtree::ExtractAppendBuffer
PiperOrigin-RevId: 407944179
--
5775100363b5890ebfe710fadebf040445eab991 by Martijn Vels <mvels@google.com>:
Add CordRepConcat::ExtractAppendBuffer
PiperOrigin-RevId: 407932968
--
9f520ba1600a93352c78f644a369c7c76195ee86 by Greg Falcon <gfalcon@google.com>:
Add cordz tracking for crc nodes.
This also adds a new kSetExpectedChecksum method to the list of tracked methods. This is presently unused but will be used soon.
PiperOrigin-RevId: 407884120
GitOrigin-RevId: 83e4cdf03a4d702b30e69204060de09e462e23c6
Change-Id: I134ace2d87215813eaa60a282996a33884676c06
3 years ago
|
|
|
kRefcountMask = ~kReservedFlag,
|
|
|
|
};
|
|
|
|
|
|
|
|
std::atomic<int32_t> count_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Various representations that we allow
|
|
|
|
enum CordRepKind {
|
|
|
|
UNUSED_0 = 0,
|
Export of internal Abseil changes
--
b1fc72630aaa81c8395c3b22ba267d938fe29a2e by Derek Mauro <dmauro@google.com>:
Fix -Wdeprecated-copy warnings from Clang 13.
Example:
error: definition of implicit copy assignment operator for 'UDT' is deprecated because it has a user-declared copy constructor [-Werror,-Wdeprecated-copy]
PiperOrigin-RevId: 380058303
--
0422744812b1a2010d9eea5b17fbe89f3441b66b by Evan Brown <ezb@google.com>:
Change the "full table!" asserts in raw_hash_set to use `<= capacity` instead of `< capacity`.
If we add support for non-power-of-two-minus-one capacities, this is the correct thing to assert. For example, consider: Group::kWidth = 8, capacity_ = 8, ctrl_ = {kEmpty, 1, 2, 3, 4, 5, 6, 7, kSentinel, kEmpty, 1, 2, 3, 4, 5, 6}. In this case, if we do an unsuccessful lookup with H2 mapping to slot 1, then the first Group will contain {1, 2, 3, 4, 5, 6, 7, kSentinel} so we need to continue to the second Group (at which point seq.index() == 8 == capacity_) to find a kEmpty.
Note: this is a no-op change for now since we never have `capacity % Group::kWidth == 0`.
PiperOrigin-RevId: 380033480
--
40628c34d540356de65fabb16c1439c0ec7a0764 by Abseil Team <absl-team@google.com>:
Drop out-of-date documentation about `absl::FixedArray`'s allocator support
PiperOrigin-RevId: 379811653
--
e7ad047863ae55c9b7aec0753cfc527a4ea614bc by Evan Brown <ezb@google.com>:
Fix a bug in ConvertDeletedToEmptyAndFullToDeleted in which we were copying 1 more cloned control byte than actually exists.
When alignof(slot_type)>1, this wouldn't cause a problem because the extra byte is padding.
Also change loop bounds to not rely on the fact that capacity_+1 is a multiple of Group::kWidth.
PiperOrigin-RevId: 379311830
--
1a3ba500fb2c33205854eb9258cd6e0fb1061bca by Martijn Vels <mvels@google.com>:
Change Ring, EXTERNAL and FLAT tag values to be consecutive values
The purpose of this change is to have FLAT = EXTERNAL + 1. Especially in the ring and btree alternative code, there is a common check if a node is a 'plain' edge (EXTERNAL or FLAT), or 'something else'. This change can make that check a single branch, i.e., instead of 'tag == EXTERNAL || tag >= FLAT', we can simply check for 'tag >= EXTERNAL'. Likewise we have some cases where we check for RING, EXTERNAL or FLAT, so we align RING + 1 with EXTERNAL.
PiperOrigin-RevId: 379291576
--
0c78e65ca4d85244b106c3f8e24cf268e09e72a3 by Benjamin Barenblat <bbaren@google.com>:
Round a double multiplication before casting it to integer
The code
static_cast<int>(x * y)
(for double x and y) performs a double multiplication into a temporary
that, by standard, may have excess precision. The subsequent cast to int
discards the excess precision. However, the cast may examine the excess
precision during conversion, producing surprising results like
static_cast<int>(1.7 * 10) == 16
on certain systems. Correct this case by explicitly rounding 1.7 * 10
before casting it.
PiperOrigin-RevId: 378922064
GitOrigin-RevId: b1fc72630aaa81c8395c3b22ba267d938fe29a2e
Change-Id: Ica708a006921118673e78d5fd2d61fe0fb0894d1
4 years ago
|
|
|
SUBSTRING = 1,
|
|
|
|
CRC = 2,
|
|
|
|
BTREE = 3,
|
|
|
|
RING = 4,
|
|
|
|
EXTERNAL = 5,
|
|
|
|
|
|
|
|
// We have different tags for different sized flat arrays,
|
|
|
|
// starting with FLAT, and limited to MAX_FLAT_TAG. The below values map to an
|
|
|
|
// allocated range of 32 bytes to 256 KB. The current granularity is:
|
|
|
|
// - 8 byte granularity for flat sizes in [32 - 512]
|
|
|
|
// - 64 byte granularity for flat sizes in (512 - 8KiB]
|
|
|
|
// - 4KiB byte granularity for flat sizes in (8KiB, 256 KiB]
|
|
|
|
// If a new tag is needed in the future, then 'FLAT' and 'MAX_FLAT_TAG' should
|
|
|
|
// be adjusted as well as the Tag <---> Size mapping logic so that FLAT still
|
|
|
|
// represents the minimum flat allocation size. (32 bytes as of now).
|
|
|
|
FLAT = 6,
|
|
|
|
MAX_FLAT_TAG = 248
|
|
|
|
};
|
|
|
|
|
Export of internal Abseil changes
--
b1fc72630aaa81c8395c3b22ba267d938fe29a2e by Derek Mauro <dmauro@google.com>:
Fix -Wdeprecated-copy warnings from Clang 13.
Example:
error: definition of implicit copy assignment operator for 'UDT' is deprecated because it has a user-declared copy constructor [-Werror,-Wdeprecated-copy]
PiperOrigin-RevId: 380058303
--
0422744812b1a2010d9eea5b17fbe89f3441b66b by Evan Brown <ezb@google.com>:
Change the "full table!" asserts in raw_hash_set to use `<= capacity` instead of `< capacity`.
If we add support for non-power-of-two-minus-one capacities, this is the correct thing to assert. For example, consider: Group::kWidth = 8, capacity_ = 8, ctrl_ = {kEmpty, 1, 2, 3, 4, 5, 6, 7, kSentinel, kEmpty, 1, 2, 3, 4, 5, 6}. In this case, if we do an unsuccessful lookup with H2 mapping to slot 1, then the first Group will contain {1, 2, 3, 4, 5, 6, 7, kSentinel} so we need to continue to the second Group (at which point seq.index() == 8 == capacity_) to find a kEmpty.
Note: this is a no-op change for now since we never have `capacity % Group::kWidth == 0`.
PiperOrigin-RevId: 380033480
--
40628c34d540356de65fabb16c1439c0ec7a0764 by Abseil Team <absl-team@google.com>:
Drop out-of-date documentation about `absl::FixedArray`'s allocator support
PiperOrigin-RevId: 379811653
--
e7ad047863ae55c9b7aec0753cfc527a4ea614bc by Evan Brown <ezb@google.com>:
Fix a bug in ConvertDeletedToEmptyAndFullToDeleted in which we were copying 1 more cloned control byte than actually exists.
When alignof(slot_type)>1, this wouldn't cause a problem because the extra byte is padding.
Also change loop bounds to not rely on the fact that capacity_+1 is a multiple of Group::kWidth.
PiperOrigin-RevId: 379311830
--
1a3ba500fb2c33205854eb9258cd6e0fb1061bca by Martijn Vels <mvels@google.com>:
Change Ring, EXTERNAL and FLAT tag values to be consecutive values
The purpose of this change is to have FLAT = EXTERNAL + 1. Especially in the ring and btree alternative code, there is a common check if a node is a 'plain' edge (EXTERNAL or FLAT), or 'something else'. This change can make that check a single branch, i.e., instead of 'tag == EXTERNAL || tag >= FLAT', we can simply check for 'tag >= EXTERNAL'. Likewise we have some cases where we check for RING, EXTERNAL or FLAT, so we align RING + 1 with EXTERNAL.
PiperOrigin-RevId: 379291576
--
0c78e65ca4d85244b106c3f8e24cf268e09e72a3 by Benjamin Barenblat <bbaren@google.com>:
Round a double multiplication before casting it to integer
The code
static_cast<int>(x * y)
(for double x and y) performs a double multiplication into a temporary
that, by standard, may have excess precision. The subsequent cast to int
discards the excess precision. However, the cast may examine the excess
precision during conversion, producing surprising results like
static_cast<int>(1.7 * 10) == 16
on certain systems. Correct this case by explicitly rounding 1.7 * 10
before casting it.
PiperOrigin-RevId: 378922064
GitOrigin-RevId: b1fc72630aaa81c8395c3b22ba267d938fe29a2e
Change-Id: Ica708a006921118673e78d5fd2d61fe0fb0894d1
4 years ago
|
|
|
// There are various locations where we want to check if some rep is a 'plain'
|
|
|
|
// data edge, i.e. an external or flat rep. By having FLAT == EXTERNAL + 1, we
|
|
|
|
// can perform this check in a single branch as 'tag >= EXTERNAL'
|
|
|
|
// Likewise, we have some locations where we check for 'ring or external/flat',
|
|
|
|
// so likewise align RING to EXTERNAL.
|
|
|
|
// Note that we can leave this optimization to the compiler. The compiler will
|
|
|
|
// DTRT when it sees a condition like `tag == EXTERNAL || tag >= FLAT`.
|
Export of internal Abseil changes
--
1620e8ffaa93ef24510ca60c7fff2a07248ac9f6 by Abseil Team <absl-team@google.com>:
Update comment.
PiperOrigin-RevId: 382858259
--
20db116f28469149d10e0f7f8b976cb903dd4879 by Gennadiy Rozental <rogeeff@google.com>:
Add benchmark running on multiple flags.
Update size_tester to include cost of absl::GetFlag call.
Add size_tester invocation for bool flag.
New benchmark better represent GetFlag usage.
PiperOrigin-RevId: 382820341
--
2e097ad3811c4e329f75b98877a5e74c1d3d84fd by Abseil Team <absl-team@google.com>:
Avoid 64x64->128 multiplication in absl::Hash's mix on AArch64
On AArch64, calculating a 128-bit product is inefficient, because it requires a sequence of two instructions to calculate the upper and lower halves of the result. So calculate a 64-bit product instead.
Making MultType 64-bits means the upper 32 bits of the result do not participate in shift/xor, but the add/multiply gives us sufficient mixing.
PiperOrigin-RevId: 382625931
--
f3ae3f32cb53168c8dc91b766f2932dc87cec503 by Abseil Team <absl-team@google.com>:
Remove homegrown Round implementation
absl/time/duration.cc defined a Round implementation to accommodate old
versions of MSVC that lacked std::round(long double). Abseil no longer
supports those MSVCs, so we don’t need the homegrown implementation
anymore. Remove it, and replace calls to it with std::rint.
PiperOrigin-RevId: 382605191
--
a13631c91bf5478289e1a512ce215c85501a26f7 by Martijn Vels <mvels@google.com>:
Move the Consume() conversion functions out of cord_rep_ring into cord_rep_consume.
This makes these functions generic, so we can repurpose these for the new Btree conversion functions.
PiperOrigin-RevId: 382594902
--
7394c737500c2d8371fcf913b21ad1b321ba499d by Benjamin Barenblat <bbaren@google.com>:
Remove homegrown Round implementation
absl/time/duration.cc defined a Round implementation to accommodate old
versions of MSVC that lacked std::round(long double). Abseil no longer
supports those MSVCs, so we don’t need the homegrown implementation
anymore. Remove it, and replace calls to it with std::rint.
PiperOrigin-RevId: 382569900
--
d72a761f43dc5c9b9510c3a1363177ed26646b5d by Abseil Team <absl-team@google.com>:
Prefer `getentropy` for Emscripten.
It needs a different header, so I've separated it out from the GLIBC
check above.
PiperOrigin-RevId: 382332475
--
74e261dbb467741b2ddd8b490e04c531fdd2f559 by Martijn Vels <mvels@google.com>:
Add BTREE tag for CordRepNode implementing a Btree cord.
This change only forward declared the CordRepBtree class (not implemented yet) and defines the enum value BTREE. While RING and BTREE should never co-exist, we define a new value for BTREE so as not to make transitioning between RING and BTREE harder than it needs to be. This changes shifts the FLAT value / computation from FLAT = 4 to FLAT =5
PiperOrigin-RevId: 382326710
GitOrigin-RevId: 1620e8ffaa93ef24510ca60c7fff2a07248ac9f6
Change-Id: Ia8f99dde3874808f56062bd37ab3e63764099734
4 years ago
|
|
|
static_assert(RING == BTREE + 1, "BTREE and RING not consecutive");
|
|
|
|
static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive");
|
|
|
|
static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
|
Export of internal Abseil changes
--
b1fc72630aaa81c8395c3b22ba267d938fe29a2e by Derek Mauro <dmauro@google.com>:
Fix -Wdeprecated-copy warnings from Clang 13.
Example:
error: definition of implicit copy assignment operator for 'UDT' is deprecated because it has a user-declared copy constructor [-Werror,-Wdeprecated-copy]
PiperOrigin-RevId: 380058303
--
0422744812b1a2010d9eea5b17fbe89f3441b66b by Evan Brown <ezb@google.com>:
Change the "full table!" asserts in raw_hash_set to use `<= capacity` instead of `< capacity`.
If we add support for non-power-of-two-minus-one capacities, this is the correct thing to assert. For example, consider: Group::kWidth = 8, capacity_ = 8, ctrl_ = {kEmpty, 1, 2, 3, 4, 5, 6, 7, kSentinel, kEmpty, 1, 2, 3, 4, 5, 6}. In this case, if we do an unsuccessful lookup with H2 mapping to slot 1, then the first Group will contain {1, 2, 3, 4, 5, 6, 7, kSentinel} so we need to continue to the second Group (at which point seq.index() == 8 == capacity_) to find a kEmpty.
Note: this is a no-op change for now since we never have `capacity % Group::kWidth == 0`.
PiperOrigin-RevId: 380033480
--
40628c34d540356de65fabb16c1439c0ec7a0764 by Abseil Team <absl-team@google.com>:
Drop out-of-date documentation about `absl::FixedArray`'s allocator support
PiperOrigin-RevId: 379811653
--
e7ad047863ae55c9b7aec0753cfc527a4ea614bc by Evan Brown <ezb@google.com>:
Fix a bug in ConvertDeletedToEmptyAndFullToDeleted in which we were copying 1 more cloned control byte than actually exists.
When alignof(slot_type)>1, this wouldn't cause a problem because the extra byte is padding.
Also change loop bounds to not rely on the fact that capacity_+1 is a multiple of Group::kWidth.
PiperOrigin-RevId: 379311830
--
1a3ba500fb2c33205854eb9258cd6e0fb1061bca by Martijn Vels <mvels@google.com>:
Change Ring, EXTERNAL and FLAT tag values to be consecutive values
The purpose of this change is to have FLAT = EXTERNAL + 1. Especially in the ring and btree alternative code, there is a common check if a node is a 'plain' edge (EXTERNAL or FLAT), or 'something else'. This change can make that check a single branch, i.e., instead of 'tag == EXTERNAL || tag >= FLAT', we can simply check for 'tag >= EXTERNAL'. Likewise we have some cases where we check for RING, EXTERNAL or FLAT, so we align RING + 1 with EXTERNAL.
PiperOrigin-RevId: 379291576
--
0c78e65ca4d85244b106c3f8e24cf268e09e72a3 by Benjamin Barenblat <bbaren@google.com>:
Round a double multiplication before casting it to integer
The code
static_cast<int>(x * y)
(for double x and y) performs a double multiplication into a temporary
that, by standard, may have excess precision. The subsequent cast to int
discards the excess precision. However, the cast may examine the excess
precision during conversion, producing surprising results like
static_cast<int>(1.7 * 10) == 16
on certain systems. Correct this case by explicitly rounding 1.7 * 10
before casting it.
PiperOrigin-RevId: 378922064
GitOrigin-RevId: b1fc72630aaa81c8395c3b22ba267d938fe29a2e
Change-Id: Ica708a006921118673e78d5fd2d61fe0fb0894d1
4 years ago
|
|
|
|
|
|
|
struct CordRep {
|
Export of internal Abseil changes
--
83e4cdf03a4d702b30e69204060de09e462e23c6 by Greg Falcon <gfalcon@google.com>:
Revert the crc addition to RefcountAndFlags, and restore related comments to their original state.
In development, the implementation of SetExpectedCrc() changed, and there is no longer a need to track the CRC status in the refcount.
Since the distinction between IsOne() and IsMutable() is subtle *and unused*, removing it now can help avoid subtle bugs in the future. This distinction can always be added back later, if it proves necessary.
Keep the reserved bit for now; all it costs is one extra mask instruction in the refcount checks, and space for extra state in Cord is always hard to find.
PiperOrigin-RevId: 408647038
--
ee67585cf66954176615271f50f8b278119dd138 by Greg Falcon <gfalcon@google.com>:
Implement Cord::SetExpectedChecksum() and Cord::ExpectedChecksum().
SetExpectedChecksum() will store a uint32_t out-of-band alongside a Cord's data. This value persists through copies and assignments. Mutating operations on a Cord cause the value to be forgotten. ExpectedChecksum() retrieves the stored value, if present.
This API is intended for storing a CRC32C checksum alongside data, allowing checksums to be passed through dataflows and validated at the final step. However, this API is agnostic to the meaning of the stored value. No CRC32C validation is performed by these new APIs.
This implementation adds a new CordRep node, CordRepCrc. A CordRepCrc may (currently) only live at the top of a tree. This allows traversal logic to be agnostic to these nodes, instead putting the needed branches at the mutation level. This also implements the property requested from API review, that any mutation is guaranteed to permanently forget the stored CRC.
PiperOrigin-RevId: 408611221
--
a86f592402b37c854ebdc77d2b9b425451a7a675 by Martijn Vels <mvels@google.com>:
Move 'ExtractResult' into CordRep
The result of an extract operation is logically identical for any tree implementation, and having a single type makes 'tree independent' implementation in cord.cc more concise.
PiperOrigin-RevId: 408332408
--
baa7647e21db59a87f75af9cac62172ce38a0f71 by Abseil Team <absl-team@google.com>:
Replace usages of `assert` macros with `ABSL_HARDENING_ASSERT`.
PiperOrigin-RevId: 408272133
--
c7658133d8662c39fa5035fc93a364c7c3d327e0 by Martijn Vels <mvels@google.com>:
Add CordRepBtree::ExtractAppendBuffer
PiperOrigin-RevId: 407944179
--
5775100363b5890ebfe710fadebf040445eab991 by Martijn Vels <mvels@google.com>:
Add CordRepConcat::ExtractAppendBuffer
PiperOrigin-RevId: 407932968
--
9f520ba1600a93352c78f644a369c7c76195ee86 by Greg Falcon <gfalcon@google.com>:
Add cordz tracking for crc nodes.
This also adds a new kSetExpectedChecksum method to the list of tracked methods. This is presently unused but will be used soon.
PiperOrigin-RevId: 407884120
GitOrigin-RevId: 83e4cdf03a4d702b30e69204060de09e462e23c6
Change-Id: I134ace2d87215813eaa60a282996a33884676c06
3 years ago
|
|
|
// Result from an `extract edge` operation. Contains the (possibly changed)
|
|
|
|
// tree node as well as the extracted edge, or {tree, nullptr} if no edge
|
|
|
|
// could be extracted.
|
|
|
|
// On success, the returned `tree` value is null if `extracted` was the only
|
|
|
|
// data edge inside the tree, a data edge if there were only two data edges in
|
|
|
|
// the tree, or the (possibly new / smaller) remaining tree with the extracted
|
|
|
|
// data edge removed.
|
|
|
|
struct ExtractResult {
|
|
|
|
CordRep* tree;
|
|
|
|
CordRep* extracted;
|
|
|
|
};
|
|
|
|
|
|
|
|
CordRep() = default;
|
|
|
|
constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
|
|
|
|
: length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
|
|
|
|
|
|
|
|
// The following three fields have to be less than 32 bytes since
|
|
|
|
// that is the smallest supported flat node size.
|
|
|
|
size_t length;
|
|
|
|
RefcountAndFlags refcount;
|
|
|
|
// If tag < FLAT, it represents CordRepKind and indicates the type of node.
|
|
|
|
// Otherwise, the node type is CordRepFlat and the tag is the encoded size.
|
|
|
|
uint8_t tag;
|
Export of internal Abseil changes
--
007ce045d5d38a727ededdb5bf06e64785fd73bd by Martijn Vels <mvels@google.com>:
Add `cord_enable_btree` feature flag (default false).
PiperOrigin-RevId: 383729939
--
98e7dc6a0407b0fd7b8713d883cdb3a766e0583d by Benjamin Barenblat <bbaren@google.com>:
Eliminate some byte swapping from randen_slow
Stop swapping bytes when serializing randen_slow’s Vector128 into and
out of memory. Instead, simply index different bytes in the AES round
function. This requires byte swapping the te{0..3} lookup tables, but it
produces an 8% speedup on my Xeon W-2135.
PiperOrigin-RevId: 383689402
--
180b6bf45049188840d439b16a28e6b968669340 by Evan Brown <ezb@google.com>:
Minor simplification in drop_deletes_without_resize() - save probe_offset outside the lambda.
Also, add some consts, avoid an auto, and use lambda capture by value instead of reference.
I realized that the compiler can already optimize this - https://godbolt.org/z/Wxd9c4TfK, but I think this way makes the code a bit clearer.
PiperOrigin-RevId: 383646658
--
781706a974c4dc1c0abbb6b801fca0550229e883 by Martijn Vels <mvels@google.com>:
Change storage to contain 3 bytes.
As per the comments in the code, this allows us to utilize all available space in CordRep that may otherwise be 'lost' in padding in derived clases. For the upcoming CordrepBtree class, we want a strong guarantee on having a 64 bytes aligned implementation.
PiperOrigin-RevId: 383633963
--
8fe22ecf92492fa6649938a2215934ebfe01c714 by Derek Mauro <dmauro@google.com>:
Remove reference to str_format_arg.h, which no longer exists
PiperOrigin-RevId: 383517865
--
79397f3b18f18c1e2d7aea993b687329d626ce64 by Benjamin Barenblat <bbaren@google.com>:
Use absl::uint128 for AES random number generator
Replace randen’s internal 128-bit integer struct, u64x2, with
absl::uint128. This eliminates some code and improves support for
big-endian platforms.
PiperOrigin-RevId: 383475671
GitOrigin-RevId: 007ce045d5d38a727ededdb5bf06e64785fd73bd
Change-Id: Ia9d9c40de557221f1744fb0d6d4d6ca7ac569070
4 years ago
|
|
|
|
|
|
|
// `storage` provides two main purposes:
|
|
|
|
// - the starting point for FlatCordRep.Data() [flexible-array-member]
|
|
|
|
// - 3 bytes of additional storage for use by derived classes.
|
|
|
|
// The latter is used by CordrepConcat and CordRepBtree. CordRepConcat stores
|
|
|
|
// a 'depth' value in storage[0], and the (future) CordRepBtree class stores
|
|
|
|
// `height`, `begin` and `end` in the 3 entries. Otherwise we would need to
|
|
|
|
// allocate room for these in the derived class, as not all compilers reuse
|
|
|
|
// padding space from the base class (clang and gcc do, MSVC does not, etc)
|
|
|
|
uint8_t storage[3];
|
|
|
|
|
|
|
|
// Returns true if this instance's tag matches the requested type.
|
|
|
|
constexpr bool IsRing() const { return tag == RING; }
|
|
|
|
constexpr bool IsSubstring() const { return tag == SUBSTRING; }
|
|
|
|
constexpr bool IsCrc() const { return tag == CRC; }
|
|
|
|
constexpr bool IsExternal() const { return tag == EXTERNAL; }
|
|
|
|
constexpr bool IsFlat() const { return tag >= FLAT; }
|
|
|
|
constexpr bool IsBtree() const { return tag == BTREE; }
|
|
|
|
|
|
|
|
inline CordRepRing* ring();
|
|
|
|
inline const CordRepRing* ring() const;
|
|
|
|
inline CordRepSubstring* substring();
|
|
|
|
inline const CordRepSubstring* substring() const;
|
|
|
|
inline CordRepCrc* crc();
|
|
|
|
inline const CordRepCrc* crc() const;
|
|
|
|
inline CordRepExternal* external();
|
|
|
|
inline const CordRepExternal* external() const;
|
|
|
|
inline CordRepFlat* flat();
|
|
|
|
inline const CordRepFlat* flat() const;
|
Export of internal Abseil changes
--
1620e8ffaa93ef24510ca60c7fff2a07248ac9f6 by Abseil Team <absl-team@google.com>:
Update comment.
PiperOrigin-RevId: 382858259
--
20db116f28469149d10e0f7f8b976cb903dd4879 by Gennadiy Rozental <rogeeff@google.com>:
Add benchmark running on multiple flags.
Update size_tester to include cost of absl::GetFlag call.
Add size_tester invocation for bool flag.
New benchmark better represent GetFlag usage.
PiperOrigin-RevId: 382820341
--
2e097ad3811c4e329f75b98877a5e74c1d3d84fd by Abseil Team <absl-team@google.com>:
Avoid 64x64->128 multiplication in absl::Hash's mix on AArch64
On AArch64, calculating a 128-bit product is inefficient, because it requires a sequence of two instructions to calculate the upper and lower halves of the result. So calculate a 64-bit product instead.
Making MultType 64-bits means the upper 32 bits of the result do not participate in shift/xor, but the add/multiply gives us sufficient mixing.
PiperOrigin-RevId: 382625931
--
f3ae3f32cb53168c8dc91b766f2932dc87cec503 by Abseil Team <absl-team@google.com>:
Remove homegrown Round implementation
absl/time/duration.cc defined a Round implementation to accommodate old
versions of MSVC that lacked std::round(long double). Abseil no longer
supports those MSVCs, so we don’t need the homegrown implementation
anymore. Remove it, and replace calls to it with std::rint.
PiperOrigin-RevId: 382605191
--
a13631c91bf5478289e1a512ce215c85501a26f7 by Martijn Vels <mvels@google.com>:
Move the Consume() conversion functions out of cord_rep_ring into cord_rep_consume.
This makes these functions generic, so we can repurpose these for the new Btree conversion functions.
PiperOrigin-RevId: 382594902
--
7394c737500c2d8371fcf913b21ad1b321ba499d by Benjamin Barenblat <bbaren@google.com>:
Remove homegrown Round implementation
absl/time/duration.cc defined a Round implementation to accommodate old
versions of MSVC that lacked std::round(long double). Abseil no longer
supports those MSVCs, so we don’t need the homegrown implementation
anymore. Remove it, and replace calls to it with std::rint.
PiperOrigin-RevId: 382569900
--
d72a761f43dc5c9b9510c3a1363177ed26646b5d by Abseil Team <absl-team@google.com>:
Prefer `getentropy` for Emscripten.
It needs a different header, so I've separated it out from the GLIBC
check above.
PiperOrigin-RevId: 382332475
--
74e261dbb467741b2ddd8b490e04c531fdd2f559 by Martijn Vels <mvels@google.com>:
Add BTREE tag for CordRepNode implementing a Btree cord.
This change only forward declared the CordRepBtree class (not implemented yet) and defines the enum value BTREE. While RING and BTREE should never co-exist, we define a new value for BTREE so as not to make transitioning between RING and BTREE harder than it needs to be. This changes shifts the FLAT value / computation from FLAT = 4 to FLAT =5
PiperOrigin-RevId: 382326710
GitOrigin-RevId: 1620e8ffaa93ef24510ca60c7fff2a07248ac9f6
Change-Id: Ia8f99dde3874808f56062bd37ab3e63764099734
4 years ago
|
|
|
inline CordRepBtree* btree();
|
|
|
|
inline const CordRepBtree* btree() const;
|
|
|
|
|
|
|
|
// --------------------------------------------------------------------
|
|
|
|
// Memory management
|
|
|
|
|
|
|
|
// Destroys the provided `rep`.
|
|
|
|
static void Destroy(CordRep* rep);
|
|
|
|
|
|
|
|
// Increments the reference count of `rep`.
|
|
|
|
// Requires `rep` to be a non-null pointer value.
|
|
|
|
static inline CordRep* Ref(CordRep* rep);
|
|
|
|
|
|
|
|
// Decrements the reference count of `rep`. Destroys rep if count reaches
|
|
|
|
// zero. Requires `rep` to be a non-null pointer value.
|
|
|
|
static inline void Unref(CordRep* rep);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct CordRepSubstring : public CordRep {
|
|
|
|
size_t start; // Starting offset of substring in child
|
|
|
|
CordRep* child;
|
|
|
|
|
|
|
|
// Creates a substring on `child`, adopting a reference on `child`.
|
|
|
|
// Requires `child` to be either a flat or external node, and `pos` and `n` to
|
|
|
|
// form a non-empty partial sub range of `'child`, i.e.:
|
|
|
|
// `n > 0 && n < length && n + pos <= length`
|
|
|
|
static inline CordRepSubstring* Create(CordRep* child, size_t pos, size_t n);
|
|
|
|
|
|
|
|
// Creates a substring of `rep`. Does not adopt a reference on `rep`.
|
|
|
|
// Requires `IsDataEdge(rep) && n > 0 && pos + n <= rep->length`.
|
|
|
|
// If `n == rep->length` then this method returns `CordRep::Ref(rep)`
|
|
|
|
// If `rep` is a substring of a flat or external node, then this method will
|
|
|
|
// return a new substring of that flat or external node with `pos` adjusted
|
|
|
|
// with the original `start` position.
|
|
|
|
static inline CordRep* Substring(CordRep* rep, size_t pos, size_t n);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Type for function pointer that will invoke the releaser function and also
|
|
|
|
// delete the `CordRepExternalImpl` corresponding to the passed in
|
|
|
|
// `CordRepExternal`.
|
|
|
|
using ExternalReleaserInvoker = void (*)(CordRepExternal*);
|
|
|
|
|
|
|
|
// External CordReps are allocated together with a type erased releaser. The
|
|
|
|
// releaser is stored in the memory directly following the CordRepExternal.
|
|
|
|
struct CordRepExternal : public CordRep {
|
|
|
|
CordRepExternal() = default;
|
|
|
|
explicit constexpr CordRepExternal(absl::string_view str)
|
|
|
|
: CordRep(RefcountAndFlags::Immortal{}, str.size()),
|
|
|
|
base(str.data()),
|
|
|
|
releaser_invoker(nullptr) {}
|
|
|
|
|
|
|
|
const char* base;
|
|
|
|
// Pointer to function that knows how to call and destroy the releaser.
|
|
|
|
ExternalReleaserInvoker releaser_invoker;
|
|
|
|
|
|
|
|
// Deletes (releases) the external rep.
|
|
|
|
// Requires rep != nullptr and rep->IsExternal()
|
|
|
|
static void Delete(CordRep* rep);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Rank1 {};
|
|
|
|
struct Rank0 : Rank1 {};
|
|
|
|
|
|
|
|
template <typename Releaser, typename = ::absl::base_internal::invoke_result_t<
|
|
|
|
Releaser, absl::string_view>>
|
|
|
|
void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view data) {
|
|
|
|
::absl::base_internal::invoke(std::forward<Releaser>(releaser), data);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Releaser,
|
|
|
|
typename = ::absl::base_internal::invoke_result_t<Releaser>>
|
|
|
|
void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view) {
|
|
|
|
::absl::base_internal::invoke(std::forward<Releaser>(releaser));
|
|
|
|
}
|
|
|
|
|
|
|
|
// We use CompressedTuple so that we can benefit from EBCO.
|
|
|
|
template <typename Releaser>
|
|
|
|
struct CordRepExternalImpl
|
|
|
|
: public CordRepExternal,
|
|
|
|
public ::absl::container_internal::CompressedTuple<Releaser> {
|
|
|
|
// The extra int arg is so that we can avoid interfering with copy/move
|
|
|
|
// constructors while still benefitting from perfect forwarding.
|
|
|
|
template <typename T>
|
|
|
|
CordRepExternalImpl(T&& releaser, int)
|
|
|
|
: CordRepExternalImpl::CompressedTuple(std::forward<T>(releaser)) {
|
|
|
|
this->releaser_invoker = &Release;
|
|
|
|
}
|
|
|
|
|
|
|
|
~CordRepExternalImpl() {
|
|
|
|
InvokeReleaser(Rank0{}, std::move(this->template get<0>()),
|
|
|
|
absl::string_view(base, length));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void Release(CordRepExternal* rep) {
|
|
|
|
delete static_cast<CordRepExternalImpl*>(rep);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
inline CordRepSubstring* CordRepSubstring::Create(CordRep* child, size_t pos,
|
|
|
|
size_t n) {
|
|
|
|
assert(child != nullptr);
|
|
|
|
assert(n > 0);
|
|
|
|
assert(n < child->length);
|
|
|
|
assert(pos < child->length);
|
|
|
|
assert(n <= child->length - pos);
|
|
|
|
|
|
|
|
// TODO(b/217376272): Harden internal logic.
|
|
|
|
// Move to strategical places inside the Cord logic and make this an assert.
|
|
|
|
if (ABSL_PREDICT_FALSE(!(child->IsExternal() || child->IsFlat()))) {
|
|
|
|
LogFatalNodeType(child);
|
|
|
|
}
|
|
|
|
|
|
|
|
CordRepSubstring* rep = new CordRepSubstring();
|
|
|
|
rep->length = n;
|
|
|
|
rep->tag = SUBSTRING;
|
|
|
|
rep->start = pos;
|
|
|
|
rep->child = child;
|
|
|
|
return rep;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline CordRep* CordRepSubstring::Substring(CordRep* rep, size_t pos,
|
|
|
|
size_t n) {
|
|
|
|
assert(rep != nullptr);
|
|
|
|
assert(n != 0);
|
|
|
|
assert(pos < rep->length);
|
|
|
|
assert(n <= rep->length - pos);
|
|
|
|
if (n == rep->length) return CordRep::Ref(rep);
|
|
|
|
if (rep->IsSubstring()) {
|
|
|
|
pos += rep->substring()->start;
|
|
|
|
rep = rep->substring()->child;
|
|
|
|
}
|
|
|
|
CordRepSubstring* substr = new CordRepSubstring();
|
|
|
|
substr->length = n;
|
|
|
|
substr->tag = SUBSTRING;
|
|
|
|
substr->start = pos;
|
|
|
|
substr->child = CordRep::Ref(rep);
|
|
|
|
return substr;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void CordRepExternal::Delete(CordRep* rep) {
|
|
|
|
assert(rep != nullptr && rep->IsExternal());
|
|
|
|
auto* rep_external = static_cast<CordRepExternal*>(rep);
|
|
|
|
assert(rep_external->releaser_invoker != nullptr);
|
|
|
|
rep_external->releaser_invoker(rep_external);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Str>
|
|
|
|
struct ConstInitExternalStorage {
|
|
|
|
ABSL_CONST_INIT static CordRepExternal value;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename Str>
|
|
|
|
ABSL_CONST_INIT CordRepExternal
|
|
|
|
ConstInitExternalStorage<Str>::value(Str::value);
|
|
|
|
|
|
|
|
enum {
|
|
|
|
kMaxInline = 15,
|
|
|
|
};
|
|
|
|
|
|
|
|
constexpr char GetOrNull(absl::string_view data, size_t pos) {
|
|
|
|
return pos < data.size() ? data[pos] : '\0';
|
|
|
|
}
|
|
|
|
|
|
|
|
// We store cordz_info as 64 bit pointer value in big endian format. This
|
|
|
|
// guarantees that the least significant byte of cordz_info matches the last
|
|
|
|
// byte of the inline data representation in as_chars_, which holds the inlined
|
|
|
|
// size or the 'is_tree' bit.
|
|
|
|
using cordz_info_t = int64_t;
|
|
|
|
|
|
|
|
// Assert that the `cordz_info` pointer value perfectly overlaps the last half
|
|
|
|
// of `as_chars_` and can hold a pointer value.
|
|
|
|
static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, "");
|
|
|
|
static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), "");
|
|
|
|
|
|
|
|
// BigEndianByte() creates a big endian representation of 'value', i.e.: a big
|
|
|
|
// endian value where the last byte in the host's representation holds 'value`,
|
|
|
|
// with all other bytes being 0.
|
|
|
|
static constexpr cordz_info_t BigEndianByte(unsigned char value) {
|
|
|
|
#if defined(ABSL_IS_BIG_ENDIAN)
|
|
|
|
return value;
|
|
|
|
#else
|
|
|
|
return static_cast<cordz_info_t>(value) << ((sizeof(cordz_info_t) - 1) * 8);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
class InlineData {
|
|
|
|
public:
|
|
|
|
// DefaultInitType forces the use of the default initialization constructor.
|
|
|
|
enum DefaultInitType { kDefaultInit };
|
|
|
|
|
|
|
|
// kNullCordzInfo holds the big endian representation of intptr_t(1)
|
|
|
|
// This is the 'null' / initial value of 'cordz_info'. The null value
|
|
|
|
// is specifically big endian 1 as with 64-bit pointers, the last
|
|
|
|
// byte of cordz_info overlaps with the last byte holding the tag.
|
|
|
|
static constexpr cordz_info_t kNullCordzInfo = BigEndianByte(1);
|
|
|
|
|
|
|
|
constexpr InlineData() : as_chars_{0} {}
|
|
|
|
explicit InlineData(DefaultInitType) {}
|
|
|
|
explicit constexpr InlineData(CordRep* rep) : as_tree_(rep) {}
|
|
|
|
explicit constexpr InlineData(absl::string_view chars)
|
|
|
|
: as_chars_{
|
|
|
|
GetOrNull(chars, 0), GetOrNull(chars, 1),
|
|
|
|
GetOrNull(chars, 2), GetOrNull(chars, 3),
|
|
|
|
GetOrNull(chars, 4), GetOrNull(chars, 5),
|
|
|
|
GetOrNull(chars, 6), GetOrNull(chars, 7),
|
|
|
|
GetOrNull(chars, 8), GetOrNull(chars, 9),
|
|
|
|
GetOrNull(chars, 10), GetOrNull(chars, 11),
|
|
|
|
GetOrNull(chars, 12), GetOrNull(chars, 13),
|
|
|
|
GetOrNull(chars, 14), static_cast<char>((chars.size() << 1))} {}
|
|
|
|
|
|
|
|
// Returns true if the current instance is empty.
|
|
|
|
// The 'empty value' is an inlined data value of zero length.
|
|
|
|
bool is_empty() const { return tag() == 0; }
|
|
|
|
|
|
|
|
// Returns true if the current instance holds a tree value.
|
|
|
|
bool is_tree() const { return (tag() & 1) != 0; }
|
|
|
|
|
|
|
|
// Returns true if the current instance holds a cordz_info value.
|
|
|
|
// Requires the current instance to hold a tree value.
|
|
|
|
bool is_profiled() const {
|
|
|
|
assert(is_tree());
|
|
|
|
return as_tree_.cordz_info != kNullCordzInfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if either of the provided instances hold a cordz_info value.
|
|
|
|
// This method is more efficient than the equivalent `data1.is_profiled() ||
|
|
|
|
// data2.is_profiled()`. Requires both arguments to hold a tree.
|
|
|
|
static bool is_either_profiled(const InlineData& data1,
|
|
|
|
const InlineData& data2) {
|
|
|
|
assert(data1.is_tree() && data2.is_tree());
|
|
|
|
return (data1.as_tree_.cordz_info | data2.as_tree_.cordz_info) !=
|
|
|
|
kNullCordzInfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the cordz_info sampling instance for this instance, or nullptr
|
|
|
|
// if the current instance is not sampled and does not have CordzInfo data.
|
|
|
|
// Requires the current instance to hold a tree value.
|
|
|
|
CordzInfo* cordz_info() const {
|
|
|
|
assert(is_tree());
|
|
|
|
intptr_t info = static_cast<intptr_t>(
|
|
|
|
absl::big_endian::ToHost64(static_cast<uint64_t>(as_tree_.cordz_info)));
|
|
|
|
assert(info & 1);
|
|
|
|
return reinterpret_cast<CordzInfo*>(info - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sets the current cordz_info sampling instance for this instance, or nullptr
|
|
|
|
// if the current instance is not sampled and does not have CordzInfo data.
|
|
|
|
// Requires the current instance to hold a tree value.
|
|
|
|
void set_cordz_info(CordzInfo* cordz_info) {
|
|
|
|
assert(is_tree());
|
|
|
|
uintptr_t info = reinterpret_cast<uintptr_t>(cordz_info) | 1;
|
|
|
|
as_tree_.cordz_info =
|
|
|
|
static_cast<cordz_info_t>(absl::big_endian::FromHost64(info));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resets the current cordz_info to null / empty.
|
|
|
|
void clear_cordz_info() {
|
|
|
|
assert(is_tree());
|
|
|
|
as_tree_.cordz_info = kNullCordzInfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a read only pointer to the character data inside this instance.
|
|
|
|
// Requires the current instance to hold inline data.
|
|
|
|
const char* as_chars() const {
|
|
|
|
assert(!is_tree());
|
|
|
|
return as_chars_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a mutable pointer to the character data inside this instance.
|
|
|
|
// Should be used for 'write only' operations setting an inlined value.
|
|
|
|
// Applications can set the value of inlined data either before or after
|
|
|
|
// setting the inlined size, i.e., both of the below are valid:
|
|
|
|
//
|
|
|
|
// // Set inlined data and inline size
|
|
|
|
// memcpy(data_.as_chars(), data, size);
|
|
|
|
// data_.set_inline_size(size);
|
|
|
|
//
|
|
|
|
// // Set inlined size and inline data
|
|
|
|
// data_.set_inline_size(size);
|
|
|
|
// memcpy(data_.as_chars(), data, size);
|
|
|
|
//
|
|
|
|
// It's an error to read from the returned pointer without a preceding write
|
|
|
|
// if the current instance does not hold inline data, i.e.: is_tree() == true.
|
|
|
|
char* as_chars() { return as_chars_; }
|
|
|
|
|
|
|
|
// Returns the tree value of this value.
|
|
|
|
// Requires the current instance to hold a tree value.
|
|
|
|
CordRep* as_tree() const {
|
|
|
|
assert(is_tree());
|
|
|
|
return as_tree_.rep;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize this instance to holding the tree value `rep`,
|
|
|
|
// initializing the cordz_info to null, i.e.: 'not profiled'.
|
|
|
|
void make_tree(CordRep* rep) {
|
|
|
|
as_tree_.rep = rep;
|
|
|
|
as_tree_.cordz_info = kNullCordzInfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the tree value of this instance to 'rep`.
|
|
|
|
// Requires the current instance to already hold a tree value.
|
|
|
|
// Does not affect the value of cordz_info.
|
|
|
|
void set_tree(CordRep* rep) {
|
|
|
|
assert(is_tree());
|
|
|
|
as_tree_.rep = rep;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the size of the inlined character data inside this instance.
|
|
|
|
// Requires the current instance to hold inline data.
|
|
|
|
size_t inline_size() const {
|
|
|
|
assert(!is_tree());
|
|
|
|
return static_cast<size_t>(tag()) >> 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sets the size of the inlined character data inside this instance.
|
|
|
|
// Requires `size` to be <= kMaxInline.
|
|
|
|
// See the documentation on 'as_chars()' for more information and examples.
|
|
|
|
void set_inline_size(size_t size) {
|
|
|
|
ABSL_ASSERT(size <= kMaxInline);
|
|
|
|
tag() = static_cast<char>(size << 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// See cordz_info_t for forced alignment and size of `cordz_info` details.
|
|
|
|
struct AsTree {
|
|
|
|
explicit constexpr AsTree(absl::cord_internal::CordRep* tree)
|
|
|
|
: rep(tree), cordz_info(kNullCordzInfo) {}
|
|
|
|
// This union uses up extra space so that whether rep is 32 or 64 bits,
|
|
|
|
// cordz_info will still start at the eighth byte, and the last
|
|
|
|
// byte of cordz_info will still be the last byte of InlineData.
|
|
|
|
union {
|
|
|
|
absl::cord_internal::CordRep* rep;
|
|
|
|
cordz_info_t unused_aligner;
|
|
|
|
};
|
|
|
|
cordz_info_t cordz_info;
|
|
|
|
};
|
|
|
|
|
|
|
|
char& tag() { return reinterpret_cast<char*>(this)[kMaxInline]; }
|
|
|
|
char tag() const { return reinterpret_cast<const char*>(this)[kMaxInline]; }
|
|
|
|
|
|
|
|
// If the data has length <= kMaxInline, we store it in `as_chars_`, and
|
|
|
|
// store the size in the last char of `as_chars_` shifted left + 1.
|
|
|
|
// Else we store it in a tree and store a pointer to that tree in
|
|
|
|
// `as_tree_.rep` and store a tag in `tagged_size`.
|
|
|
|
union {
|
|
|
|
char as_chars_[kMaxInline + 1];
|
|
|
|
AsTree as_tree_;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
static_assert(sizeof(InlineData) == kMaxInline + 1, "");
|
|
|
|
|
|
|
|
inline CordRepSubstring* CordRep::substring() {
|
|
|
|
assert(IsSubstring());
|
|
|
|
return static_cast<CordRepSubstring*>(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline const CordRepSubstring* CordRep::substring() const {
|
|
|
|
assert(IsSubstring());
|
|
|
|
return static_cast<const CordRepSubstring*>(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline CordRepExternal* CordRep::external() {
|
|
|
|
assert(IsExternal());
|
|
|
|
return static_cast<CordRepExternal*>(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline const CordRepExternal* CordRep::external() const {
|
|
|
|
assert(IsExternal());
|
|
|
|
return static_cast<const CordRepExternal*>(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline CordRep* CordRep::Ref(CordRep* rep) {
|
|
|
|
// ABSL_ASSUME is a workaround for
|
|
|
|
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105585
|
|
|
|
ABSL_ASSUME(rep != nullptr);
|
|
|
|
rep->refcount.Increment();
|
|
|
|
return rep;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void CordRep::Unref(CordRep* rep) {
|
|
|
|
assert(rep != nullptr);
|
|
|
|
// Expect refcount to be 0. Avoiding the cost of an atomic decrement should
|
|
|
|
// typically outweigh the cost of an extra branch checking for ref == 1.
|
|
|
|
if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) {
|
|
|
|
Destroy(rep);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace cord_internal
|
|
|
|
|
|
|
|
ABSL_NAMESPACE_END
|
|
|
|
} // namespace absl
|
|
|
|
#endif // ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
|