Roll abseil_revision 231c393a17..f3489c9ca6
Change Log:
https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+log/231c393a17..f3489c9ca6
Full diff:
https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+/231c393a17..f3489c9ca6
Bug: None
Change-Id: I2a19ee77eb9ba6f6bee982d9e4dafa4a95ff71f8
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3545065
Auto-Submit: Mirko Bonadei <mbonadei@chromium.org>
Reviewed-by: Danil Chapovalov <danilchap@chromium.org>
Commit-Queue: Danil Chapovalov <danilchap@chromium.org>
Cr-Commit-Position: refs/heads/main@{#984261}
NOKEYCHECK=True
GitOrigin-RevId: ae9f8a56d550958a4a14036798bbeafaa8a15e5b
diff --git a/README.chromium b/README.chromium
index c51245c..4a64d5b 100644
--- a/README.chromium
+++ b/README.chromium
@@ -4,7 +4,7 @@
License: Apache 2.0
License File: LICENSE
Version: 0
-Revision: 231c393a170ce3c6d83e8456bc87fe917c333ecf
+Revision: f3489c9ca64e0fad2a263e8560ee96718ac8b21b
Security Critical: yes
Description:
diff --git a/absl/base/internal/direct_mmap.h b/absl/base/internal/direct_mmap.h
index 274054c..a01d612 100644
--- a/absl/base/internal/direct_mmap.h
+++ b/absl/base/internal/direct_mmap.h
@@ -20,7 +20,7 @@
#include "absl/base/config.h"
-#if ABSL_HAVE_MMAP
+#ifdef ABSL_HAVE_MMAP
#include <sys/mman.h>
diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h
index 07f867a..a435140 100644
--- a/absl/base/internal/unscaledcycleclock.h
+++ b/absl/base/internal/unscaledcycleclock.h
@@ -47,7 +47,7 @@
// The following platforms have an implementation of a hardware counter.
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \
- defined(_M_IX86) || defined(_M_X64)
+ defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
#else
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index a22c9bd..da2abcb 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -631,7 +631,8 @@
// Compute how many values we can fit onto a leaf node taking into account
// padding.
- constexpr static size_type NodeTargetSlots(const int begin, const int end) {
+ constexpr static size_type NodeTargetSlots(const size_type begin,
+ const size_type end) {
return begin == end ? begin
: SizeWithNSlots((begin + end) / 2 + 1) >
params_type::kTargetNodeSize
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 687bcb8..61bdb77 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -23,6 +23,8 @@
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = {
ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 0daf722..d24bbe8 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -53,51 +53,125 @@
//
// IMPLEMENTATION DETAILS
//
-// The table stores elements inline in a slot array. In addition to the slot
-// array the table maintains some control state per slot. The extra state is one
-// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
-// the hash of an occupied slot. The table is split into logical groups of
-// slots, like so:
+// # Table Layout
+//
+// A raw_hash_set's backing array consists of control bytes followed by slots
+// that may or may not contain objects.
+//
+// The layout of the backing array, for `capacity` slots, is thus, as a
+// pseudo-struct:
+//
+// struct BackingArray {
+// // Control bytes for the "real" slots.
+// ctrl_t ctrl[capacity];
+// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
+// // stop and serves no other purpose.
+// ctrl_t sentinel;
+// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
+// // that if a probe sequence picks a value near the end of `ctrl`,
+// // `Group` will have valid control bytes to look at.
+// ctrl_t clones[kWidth - 1];
+// // The actual slot data.
+// slot_type slots[capacity];
+// };
+//
+// The length of this array is computed by `AllocSize()` below.
+//
+// Control bytes (`ctrl_t`) are bytes (collected into groups of a
+// platform-specific size) that define the state of the corresponding slot in
+// the slot array. Group manipulation is tightly optimized to be as efficient
+// as possible: SSE and friends on x86, clever bit operations on other arches.
//
// Group 1 Group 2 Group 3
// +---------------+---------------+---------------+
// | | | | | | | | | | | | | | | | | | | | | | | | |
// +---------------+---------------+---------------+
//
-// On lookup the hash is split into two parts:
-// - H2: 7 bits (those stored in the control bytes)
-// - H1: the rest of the bits
-// The groups are probed using H1. For each group the slots are matched to H2 in
-// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
-// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+// Each control byte is either a special value for empty slots, deleted slots
+// (sometimes called *tombstones*), and a speical end-of-table marker used by
+// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
+// corresponding slot.
//
-// On insert, once the right group is found (as in lookup), its slots are
-// filled in order.
+// Storing control bytes in a separate array also has beneficial cache effects,
+// since more logical slots will fit into a cache line.
//
-// On erase a slot is cleared. In case the group did not have any empty slots
-// before the erase, the erased slot is marked as deleted.
+// # Table operations.
//
-// Groups without empty slots (but maybe with deleted slots) extend the probe
-// sequence. The probing algorithm is quadratic. Given N the number of groups,
-// the probing function for the i'th probe is:
+// The key operations are `insert`, `find`, and `erase_at`; the operations below
//
-// P(0) = H1 % N
+// `insert` and `erase` are implemented in terms of find, so we describe that
+// one first. To `find` a value `x`, we compute `hash(x)`. From `H1(hash(x))`
+// and the capacity, we construct a `probe_seq` that visits every group of
+// slots in some interesting order.
//
-// P(i) = (P(i - 1) + i) % N
+// We now walk through these indices. At each index, we select the entire group
+// starting with that index and extract potential candidates: occupied slots
+// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
+// group, we stop and return an error. Each candidate slot `y` is compared with
+// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the
+// next probe index. Tombstones effectively behave like full slots that never
+// match the value we're looking for.
//
-// This probing function guarantees that after N probes, all the groups of the
-// table will be probed exactly once.
+// The `H2` bits ensure that if we perform a ==, a false positive is very very
+// rare (assuming the hash function looks enough like a random oracle). To see
+// this, note that in a group, there will be at most 8 or 16 `H2` values, but
+// an `H2` can be any one of 128 values. Viewed as a birthday attack, we can use
+// the rule of thumb that the probability of a collision among n choices of m
+// symbols is `p(n, m) ~ n^2/2m. In table form:
//
-// The control state and slot array are stored contiguously in a shared heap
-// allocation. The layout of this allocation is: `capacity()` control bytes,
-// one sentinel control byte, `Group::kWidth - 1` cloned control bytes,
-// <possible padding>, `capacity()` slots. The sentinel control byte is used in
-// iteration so we know when we reach the end of the table. The cloned control
-// bytes at the end of the table are cloned from the beginning of the table so
-// groups that begin near the end of the table can see a full group. In cases in
-// which there are more than `capacity()` cloned control bytes, the extra bytes
-// are `kEmpty`, and these ensure that we always see at least one empty slot and
-// can stop an unsuccessful search.
+// n | p(n) | n | p(n)
+// 0 | 0.000 | 8 | 0.250
+// 1 | 0.004 | 9 | 0.316
+// 2 | 0.016 | 10 | 0.391
+// 3 | 0.035 | 11 | 0.473
+// 4 | 0.062 | 12 | 0.562
+// 5 | 0.098 | 13 | 0.660
+// 6 | 0.141 | 14 | 0.766
+// 7 | 0.191 | 15 | 0.879
+//
+// The rule of thumb breaks down at around `n = 12`, but such groups would only
+// occur for tables close to their load factor. This is far better than an
+// ordinary open-addressing table, which needs to perform an == at every step of
+// the probe sequence. These probabilities don't tell the full story (for
+// example, because elements are inserted into a group from the front, and
+// candidates are =='ed from the front, the collision is only effective in
+// rare cases e.g. another probe sequence inserted into a deleted slot in front
+// of us).
+//
+// `insert` is implemented in terms of `unchecked_insert`, which inserts a
+// value presumed to not be in the table (violating this requirement will cause
+// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
+// it, we construct a `probe_seq` once again, and use it to find the first
+// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
+// first such slot in the group and mark it as full with `x`'s H2.
+//
+// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
+// perform a `find` to see if it's already present; if it is, we're done. If
+// it's not, we may decide the table is getting overcrowded (i.e. the load
+// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
+// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
+// each element of the table into the new array (we know that no insertion here
+// will insert an already-present value), and discard the old backing array. At
+// this point, we may `unchecked_insert` the value `x`.
+//
+// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
+// presents a viable, intialized slot pointee to the caller.
+//
+// `erase` is implemented in terms of `erase_at`, which takes an index to a
+// slot. Given an offset, we simply create a tombstone and destroy its contents.
+// If we can prove that the slot would not appear in a probe sequence, we can
+// make the slot as empty, instead. We can prove this by observing that if a
+// group has any empty slots, it has never been full (assuming we never create
+// an empty slot in a group with no empties, which this heuristic guarantees we
+// never do) and find would stop at this group anyways (since it does not probe
+// beyond groups with empties).
+//
+// `erase` is `erase_at` composed with `find`: if we
+// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
+// slot.
+//
+// To iterate, we simply traverse the array, skipping empty and deleted slots
+// and stopping when we hit a `kSentinel`.
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
@@ -142,14 +216,36 @@
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
+// The state for a probe sequence.
+//
+// Currently, the sequence is a triangular progression of the form
+//
+// p(i) := Width * (i^2 - i)/2 + hash (mod mask + 1)
+//
+// The use of `Width` ensures that each probe step does not overlap groups;
+// the sequence effectively outputs the addresses of *groups* (although not
+// necessarily aligned to any boundary). The `Group` machinery allows us
+// to check an entire group with minimal branching.
+//
+// Wrapping around at `mask + 1` is important, but not for the obvious reason.
+// As described above, the first few entries of the control byte array
+// is mirrored at the end of the array, which `Group` will find and use
+// for selecting candidates. However, when those candidates' slots are
+// actually inspected, there are no corresponding slots for the cloned bytes,
+// so we need to make sure we've treated those offsets as "wrapping around".
template <size_t Width>
class probe_seq {
public:
+ // Creates a new probe sequence using `hash` as the initial value of the
+ // sequence and `mask` (usually the capacity of the table) as the mask to
+ // apply to each value in the progression.
probe_seq(size_t hash, size_t mask) {
assert(((mask + 1) & mask) == 0 && "not a mask");
mask_ = mask;
offset_ = hash & mask_;
}
+
+ // The offset within the table, i.e., the value `p(i)` above.
size_t offset() const { return offset_; }
size_t offset(size_t i) const { return (offset_ + i) & mask_; }
@@ -158,7 +254,7 @@
offset_ += index_;
offset_ &= mask_;
}
- // 0-based probe index. The i-th probe in the probe sequence.
+ // 0-based probe index, a multiple of `Width`.
size_t index() const { return index_; }
private:
@@ -182,9 +278,9 @@
template <class Policy, class Hash, class Eq, class... Ts>
struct IsDecomposable<
- absl::void_t<decltype(
- Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
- std::declval<Ts>()...))>,
+ absl::void_t<decltype(Policy::apply(
+ RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
@@ -204,14 +300,20 @@
return static_cast<uint32_t>(countr_zero(x));
}
-// An abstraction over a bitmask. It provides an easy way to iterate through the
-// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
-// this is a true bitmask. On non-SSE, platforms the arithematic used to
-// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
-// either 0x00 or 0x80.
+// A abstract bitmask, such as that emitted by a SIMD instruction.
+//
+// Specifically, this type implements a simple bitset whose representation is
+// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
+// of abstract bits in the bitset, while `Shift` is the log-base-two of the
+// width of an abstract bit in the representation.
+//
+// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
+// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
+// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
+// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
//
// For example:
-// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
template <class T, int SignificantBits, int Shift = 0>
class BitMask {
@@ -219,7 +321,7 @@
static_assert(Shift == 0 || Shift == 3, "");
public:
- // These are useful for unit tests (gunit).
+ // BitMask is an iterator over the indices of its abstract bits.
using value_type = int;
using iterator = BitMask;
using const_iterator = BitMask;
@@ -231,20 +333,26 @@
}
explicit operator bool() const { return mask_ != 0; }
uint32_t operator*() const { return LowestBitSet(); }
- uint32_t LowestBitSet() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
- uint32_t HighestBitSet() const {
- return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
- }
BitMask begin() const { return *this; }
BitMask end() const { return BitMask(0); }
+ // Returns the index of the lowest *abstract* bit set in `self`.
+ uint32_t LowestBitSet() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
+ // Returns the index of the highest *abstract* bit set in `self`.
+ uint32_t HighestBitSet() const {
+ return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
+ }
+
+ // Return the number of trailing zero *abstract* bits.
uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
+ // Return the number of leading zero *abstract* bits.
uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
@@ -265,8 +373,22 @@
using h2_t = uint8_t;
// The values here are selected for maximum performance. See the static asserts
-// below for details. We use an enum class so that when strict aliasing is
-// enabled, the compiler knows ctrl_t doesn't alias other types.
+// below for details.
+
+// A `ctrl_t` is a single control byte, which can have one of four
+// states: empty, deleted, full (which has an associated seven-bit h2_t value)
+// and the sentinel. They have the following bit patterns:
+//
+// empty: 1 0 0 0 0 0 0 0
+// deleted: 1 1 1 1 1 1 1 0
+// full: 0 h h h h h h h // h represents the hash bits.
+// sentinel: 1 1 1 1 1 1 1 1
+//
+// These values are specifically tuned for SSE-flavored SIMD.
+// The static_asserts below detail the source of these choices.
+//
+// We use an enum class so that when strict aliasing is enabled, the compiler
+// knows ctrl_t doesn't alias other types.
enum class ctrl_t : int8_t {
kEmpty = -128, // 0b10000000
kDeleted = -2, // 0b11111110
@@ -299,10 +421,12 @@
"ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
ABSL_DLL extern const ctrl_t kEmptyGroup[16];
+
+// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
+ // Const must be cast away here; no uses of this function will actually write
+ // to it, because it is only used for empty tables.
return const_cast<ctrl_t*>(kEmptyGroup);
}
@@ -310,28 +434,61 @@
// randomize insertion order within groups.
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
-// Returns a hash seed.
+// Returns a per-table, hash salt, which changes on resize. This gets mixed into
+// H1 to randomize iteration order per-table.
//
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
// non-determinism of iteration order in most cases.
-inline size_t HashSeed(const ctrl_t* ctrl) {
+inline size_t PerTableSalt(const ctrl_t* ctrl) {
// The low bits of the pointer have little or no entropy because of
// alignment. We shift the pointer to try to use higher entropy bits. A
// good number seems to be 12 bits, because that aligns with page size.
return reinterpret_cast<uintptr_t>(ctrl) >> 12;
}
-
+// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
- return (hash >> 7) ^ HashSeed(ctrl);
+ return (hash >> 7) ^ PerTableSalt(ctrl);
}
+
+// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+//
+// Thse are used used as an occupied control byte.
inline h2_t H2(size_t hash) { return hash & 0x7F; }
+// Helpers for checking the state of a control byte.
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+// Quick eference guide for intrinsics used below:
+//
+// * __m128i: An XMM (128-bit) word.
+//
+// * _mm_setzero_si128: Returns a zero vector.
+// * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
+//
+// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
+// * _mm_and_si128: Ands two i128s together.
+// * _mm_or_si128: Ors two i128s together.
+// * _mm_andnot_si128: And-nots two i128s together.
+//
+// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
+// filling each lane with 0x00 or 0xff.
+// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
+//
+// * _mm_loadu_si128: Performs an unaligned load of an i128.
+// * _mm_storeu_si128: Performs an unaligned store of a i128.
+//
+// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
+// argument if the corresponding lane of the second
+// argument is positive, negative, or zero, respectively.
+// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
+// bitmask consisting of those bits.
+// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
+// four bits of each i8 lane in the second argument as
+// indices.
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -377,9 +534,8 @@
// Returns a bitmask representing the positions of empty or deleted slots.
BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
- return BitMask<uint32_t, kWidth>(
- static_cast<uint32_t>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
+ return BitMask<uint32_t, kWidth>(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
}
// Returns the number of trailing empty or deleted elements in the group.
@@ -464,26 +620,32 @@
using Group = GroupPortableImpl;
#endif
-// The number of cloned control bytes that we copy from the beginning to the
-// end of the control bytes array.
+// Returns he number of "cloned control bytes".
+//
+// This is the number of control bytes that are present both at the beginning
+// of the control byte array and at the end, such that we can create a
+// `Group::kWidth`-width probe window starting from any control byte.
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+// Applies the following mapping to every byte in the control array:
+// * kDeleted -> kEmpty
+// * kEmpty -> kEmpty
+// * _ -> kDeleted
// PRECONDITION:
// IsValidCapacity(capacity)
// ctrl[capacity] == ctrl_t::kSentinel
// ctrl[i] != ctrl_t::kSentinel for all i < capacity
-// Applies mapping for every byte in ctrl:
-// DELETED -> EMPTY
-// EMPTY -> EMPTY
-// FULL -> DELETED
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
-// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+// Converts `n` into the next valid capacity, per `IsValidCapacity`.
inline size_t NormalizeCapacity(size_t n) {
return n ? ~size_t{} >> countl_zero(n) : 1;
}
@@ -496,8 +658,8 @@
// never need to probe (the whole table fits in one group) so we don't need a
// load factor less than 1.
-// Given `capacity` of the table, returns the size (i.e. number of full slots)
-// at which we should grow the capacity.
+// Given `capacity`, applies the load factor; i.e., it returns the maximum
+// number of values we should put into the table before a resizing rehash.
inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity));
// `capacity*7/8`
@@ -507,8 +669,12 @@
}
return capacity - capacity / 8;
}
-// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and requires NormalizeCapacity().
+
+// Given `growth`, "unapplies" the load factor to find how large the capacity
+// should be to stay within the load factor.
+//
+// This might not be a valid capacity and `NormalizeCapacity()` should be
+// called on this.
inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7`
if (Group::kWidth == 8 && growth == 7) {
@@ -555,37 +721,33 @@
size_t probe_length;
};
-// The representation of the object has two modes:
-// - small: For capacities < kWidth-1
-// - large: For the rest.
+// Whether a table is "small". A small table fits entirely into a probing
+// group, i.e., has a capacity < `Group::kWidth`.
//
-// Differences:
-// - In small mode we are able to use the whole capacity. The extra control
-// bytes give us at least one "empty" control byte to stop the iteration.
-// This is important to make 1 a valid capacity.
+// In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
//
-// - In small mode only the first `capacity()` control bytes after the
-// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
-// represent a real slot. This is important to take into account on
-// find_first_non_full(), where we never try ShouldInsertBackwards() for
-// small tables.
+// In small mode only the first `capacity` control bytes after the sentinel
+// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// `find_first_or_null()`, where we never try
+// `ShouldInsertBackwards()` for small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+// Begins a probing operation on `ctrl`, using `hash`.
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
-// Probes the raw_hash_set with the probe sequence for hash and returns the
-// pointer to the first empty or deleted slot.
-// NOTE: this function must work with tables having both ctrl_t::kEmpty and
-// ctrl_t::kDeleted in one group. Such tables appears during
-// drop_deletes_without_resize.
+// Probes an array of control bits using a probe sequence derived from `hash`,
+// and returns the offset corresponding to the first deleted or empty slot.
//
-// This function is very useful when insertions happen and:
-// - the input is already a set
-// - there are enough slots
-// - the element with the hash is not in the table
+// Behavior when the entire table is full is undefined.
+//
+// NOTE: this function must work with tables having both empty and deleted
+// slots in the same group. Such tables appear during `erase()`.
template <typename = void>
inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
@@ -615,7 +777,8 @@
// corresponding translation unit.
extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
-// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel.
+// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
+// array as deleted.
inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
size_t slot_size) {
std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
@@ -624,8 +787,10 @@
SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
}
-// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte
-// at the end too.
+// Sets `ctrl[i]` to `h`.
+//
+// Unlike setting it directly, this function will perform bounds checks and
+// mirror the value to the cloned tail if necessary.
inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
const void* slot, size_t slot_size) {
assert(i < capacity);
@@ -641,25 +806,28 @@
ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
}
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
const void* slot, size_t slot_size) {
SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
}
-// The allocated block consists of `capacity + 1 + NumClonedBytes()` control
-// bytes followed by `capacity` slots, which must be aligned to `slot_align`.
-// SlotOffset returns the offset of the slots into the allocated block.
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) at which the slots begin.
inline size_t SlotOffset(size_t capacity, size_t slot_align) {
assert(IsValidCapacity(capacity));
const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
}
-// Returns the size of the allocated block. See also above comment.
+// Given the capacity of a table, computes the total size of the backing
+// array.
inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
return SlotOffset(capacity, slot_align) + capacity * slot_size;
}
+// A SwissTable.
+//
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -812,6 +980,10 @@
ABSL_ASSUME(ctrl != nullptr);
}
+ // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
+ // they reach one.
+ //
+ // If a sentinel is reached, we null both of them out instead.
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
@@ -1108,8 +1280,7 @@
// m.insert(std::make_pair("abc", 42));
// TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
// bug.
- template <class T, RequiresInsertable<T> = 0,
- class T2 = T,
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
std::pair<iterator, bool> insert(T&& value) {
@@ -1616,10 +1787,10 @@
slot_type&& slot;
};
- // "erases" the object from the container, except that it doesn't actually
- // destroy the object. It only updates all the metadata of the class.
- // This can be used in conjunction with Policy::transfer to move the object to
- // another place.
+ // Erases, but does not destroy, the value pointed to by `it`.
+ //
+ // This merely updates the pertinent control byte. This can be used in
+ // conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
--size_;
@@ -1642,6 +1813,11 @@
infoz().RecordErase();
}
+ // Allocates a backing array for `self` and initializes its control bytes.
+ // This reads `capacity_` and updates all other fields based on the result of
+ // the allocation.
+ //
+ // This does not free the currently held array; `capacity_` must be nonzero.
void initialize_slots() {
assert(capacity_);
// Folks with custom allocators often make unwarranted assumptions about the
@@ -1670,6 +1846,10 @@
infoz().RecordStorageChanged(size_, capacity_);
}
+ // Destroys all slots in the backing array, frees the backing array, and
+ // clears all top-level book-keeping data.
+ //
+ // This essentially implements `map = raw_hash_set();`.
void destroy_slots() {
if (!capacity_) return;
for (size_t i = 0; i != capacity_; ++i) {
@@ -1720,6 +1900,9 @@
infoz().RecordRehash(total_probe_length);
}
+ // Prunes control bytes to remove as many tombstones as possible.
+ //
+ // See the comment on `rehash_and_grow_if_necessary()`.
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
assert(!is_small(capacity_));
@@ -1786,6 +1969,11 @@
infoz().RecordRehash(total_probe_length);
}
+ // Called whenever the table *might* need to conditionally grow.
+ //
+ // This function is an optimization opportunity to perform a rehash even when
+ // growth is unnecessary, because vacating tombstones is beneficial for
+ // performance in the long-run.
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
resize(1);
@@ -1870,6 +2058,9 @@
}
protected:
+ // Attempts to find `key` in the table; if it isn't found, returns a slot that
+ // the value can be inserted into, with the control byte already set to
+ // `key`'s H2.
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
prefetch_heap_block();
@@ -1890,6 +2081,10 @@
return {prepare_insert(hash), true};
}
+ // Given the hash of a value not currently in the table, finds the next
+ // viable slot index to insert it at.
+ //
+ // REQUIRES: At least one non-full slot available.
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
@@ -1933,12 +2128,22 @@
growth_left() = CapacityToGrowth(capacity()) - size_;
}
+ // The number of slots we can still fill without needing to rehash.
+ //
+ // This is stored separately due to tombstones: we do not include tombstones
+ // in the growth capacity, because we'd like to rehash when the table is
+ // otherwise filled with tombstones: otherwise, probe sequences might get
+ // unacceptably long without triggering a rehash. Callers can also force a
+ // rehash via the standard `rehash(0)`, which will recompute this value as a
+ // side-effect.
+ //
+ // See `CapacityToGrowth()`.
size_t& growth_left() { return settings_.template get<0>(); }
+ // Prefetch the heap-allocated memory region to resolve potential TLB misses.
+ // This is intended to overlap with execution of calculating the hash for a
+ // key.
void prefetch_heap_block() const {
- // Prefetch the heap-allocated memory region to resolve potential TLB
- // misses. This is intended to overlap with execution of calculating the
- // hash for a key.
#if defined(__GNUC__)
__builtin_prefetch(static_cast<const void*>(ctrl_), 0, 1);
#endif // __GNUC__
@@ -1958,10 +2163,21 @@
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
- ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
- slot_type* slots_ = nullptr; // [capacity * slot_type]
- size_t size_ = 0; // number of full slots
- size_t capacity_ = 0; // total number of slots
+
+ // The control bytes (and, also, a pointer to the base of the backing array).
+ //
+ // This contains `capacity_ + 1 + NumClonedBytes()` entries, even
+ // when the table is empty (hence EmptyGroup).
+ ctrl_t* ctrl_ = EmptyGroup();
+ // The beginning of the slots, located at `SlotOffset()` bytes after
+ // `ctrl_`. May be null for empty tables.
+ slot_type* slots_ = nullptr;
+
+ // The number of filled slots.
+ size_t size_ = 0;
+
+ // The total number of available slots.
+ size_t capacity_ = 0;
absl::container_internal::CompressedTuple<size_t /* growth_left */,
HashtablezInfoHandle, hasher,
key_equal, allocator_type>
diff --git a/absl/debugging/internal/examine_stack.cc b/absl/debugging/internal/examine_stack.cc
index 81d216f..5bdd341 100644
--- a/absl/debugging/internal/examine_stack.cc
+++ b/absl/debugging/internal/examine_stack.cc
@@ -20,7 +20,13 @@
#include <unistd.h>
#endif
-#ifdef __APPLE__
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#if defined(__linux__) || defined(__APPLE__)
#include <sys/ucontext.h>
#endif
@@ -38,7 +44,102 @@
namespace debugging_internal {
namespace {
+constexpr int kDefaultDumpStackFramesLimit = 64;
+// The %p field width for printf() functions is two characters per byte,
+// and two extra for the leading "0x".
+constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
+
ABSL_CONST_INIT SymbolizeUrlEmitter debug_stack_trace_hook = nullptr;
+
+// Async-signal safe mmap allocator.
+void* Allocate(size_t num_bytes) {
+#ifdef ABSL_HAVE_MMAP
+ void* p = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ return p == MAP_FAILED ? nullptr : p;
+#else
+ (void)num_bytes;
+ return nullptr;
+#endif // ABSL_HAVE_MMAP
+}
+
+void Deallocate(void* p, size_t size) {
+#ifdef ABSL_HAVE_MMAP
+ ::munmap(p, size);
+#else
+ (void)p;
+ (void)size;
+#endif // ABSL_HAVE_MMAP
+}
+
+// Print a program counter only.
+void DumpPC(OutputWriter* writer, void* writer_arg, void* const pc,
+ const char* const prefix) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth, pc);
+ writer(buf, writer_arg);
+}
+
+// Print a program counter and the corresponding stack frame size.
+void DumpPCAndFrameSize(OutputWriter* writer, void* writer_arg, void* const pc,
+ int framesize, const char* const prefix) {
+ char buf[100];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
+ kPrintfPointerFieldWidth, pc);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize);
+ }
+ writer(buf, writer_arg);
+}
+
+// Print a program counter and the corresponding symbol.
+void DumpPCAndSymbol(OutputWriter* writer, void* writer_arg, void* const pc,
+ const char* const prefix) {
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ // Symbolizes the previous address of pc because pc may be in the
+ // next function. The overrun happens when the function ends with
+ // a call to a function annotated noreturn (e.g. CHECK).
+ // If symbolization of pc-1 fails, also try pc on the off-chance
+ // that we crashed on the first instruction of a function (that
+ // actually happens very often for e.g. __restore_rt).
+ const uintptr_t prev_pc = reinterpret_cast<uintptr_t>(pc) - 1;
+ if (absl::Symbolize(reinterpret_cast<const char*>(prev_pc), tmp,
+ sizeof(tmp)) ||
+ absl::Symbolize(pc, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ snprintf(buf, sizeof(buf), "%s@ %*p %s\n", prefix, kPrintfPointerFieldWidth,
+ pc, symbol);
+ writer(buf, writer_arg);
+}
+
+// Print a program counter, its stack frame size, and its symbol name.
+// Note that there is a separate symbolize_pc argument. Return addresses may be
+// at the end of the function, and this allows the caller to back up from pc if
+// appropriate.
+void DumpPCAndFrameSizeAndSymbol(OutputWriter* writer, void* writer_arg,
+ void* const pc, void* const symbolize_pc,
+ int framesize, const char* const prefix) {
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, symbol);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize, symbol);
+ }
+ writer(buf, writer_arg);
+}
+
} // namespace
void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook) {
@@ -50,7 +151,7 @@
// Returns the program counter from signal context, nullptr if
// unknown. vuc is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
-void* GetProgramCounter(void* vuc) {
+void* GetProgramCounter(void* const vuc) {
#ifdef __linux__
if (vuc != nullptr) {
ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
@@ -132,59 +233,17 @@
return nullptr;
}
-// The %p field width for printf() functions is two characters per byte,
-// and two extra for the leading "0x".
-static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
-
-// Print a program counter, its stack frame size, and its symbol name.
-// Note that there is a separate symbolize_pc argument. Return addresses may be
-// at the end of the function, and this allows the caller to back up from pc if
-// appropriate.
-static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*),
- void* writerfn_arg, void* pc,
- void* symbolize_pc, int framesize,
- const char* const prefix) {
- char tmp[1024];
- const char* symbol = "(unknown)";
- if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
- symbol = tmp;
- }
- char buf[1024];
- if (framesize <= 0) {
- snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
- kPrintfPointerFieldWidth, pc, symbol);
- } else {
- snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
- kPrintfPointerFieldWidth, pc, framesize, symbol);
- }
- writerfn(buf, writerfn_arg);
-}
-
-// Print a program counter and the corresponding stack frame size.
-static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*),
- void* writerfn_arg, void* pc, int framesize,
- const char* const prefix) {
- char buf[100];
- if (framesize <= 0) {
- snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
- kPrintfPointerFieldWidth, pc);
- } else {
- snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
- kPrintfPointerFieldWidth, pc, framesize);
- }
- writerfn(buf, writerfn_arg);
-}
-
-void DumpPCAndFrameSizesAndStackTrace(
- void* pc, void* const stack[], int frame_sizes[], int depth,
- int min_dropped_frames, bool symbolize_stacktrace,
- void (*writerfn)(const char*, void*), void* writerfn_arg) {
+void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
+ int frame_sizes[], int depth,
+ int min_dropped_frames,
+ bool symbolize_stacktrace,
+ OutputWriter* writer, void* writer_arg) {
if (pc != nullptr) {
// We don't know the stack frame size for PC, use 0.
if (symbolize_stacktrace) {
- DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: ");
+ DumpPCAndFrameSizeAndSymbol(writer, writer_arg, pc, pc, 0, "PC: ");
} else {
- DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: ");
+ DumpPCAndFrameSize(writer, writer_arg, pc, 0, "PC: ");
}
}
for (int i = 0; i < depth; i++) {
@@ -194,22 +253,63 @@
// call to a function annotated noreturn (e.g. CHECK). Note that we don't
// do this for pc above, as the adjustment is only correct for return
// addresses.
- DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i],
+ DumpPCAndFrameSizeAndSymbol(writer, writer_arg, stack[i],
reinterpret_cast<char*>(stack[i]) - 1,
frame_sizes[i], " ");
} else {
- DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i],
- " ");
+ DumpPCAndFrameSize(writer, writer_arg, stack[i], frame_sizes[i], " ");
}
}
if (min_dropped_frames > 0) {
char buf[100];
snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n",
min_dropped_frames);
- writerfn(buf, writerfn_arg);
+ writer(buf, writer_arg);
}
}
+// Dump current stack trace as directed by writer.
+// Make sure this function is not inlined to avoid skipping too many top frames.
+ABSL_ATTRIBUTE_NOINLINE
+void DumpStackTrace(int min_dropped_frames, int max_num_frames,
+ bool symbolize_stacktrace, OutputWriter* writer,
+ void* writer_arg) {
+ // Print stack trace
+ void* stack_buf[kDefaultDumpStackFramesLimit];
+ void** stack = stack_buf;
+ int num_stack = kDefaultDumpStackFramesLimit;
+ int allocated_bytes = 0;
+
+ if (num_stack >= max_num_frames) {
+ // User requested fewer frames than we already have space for.
+ num_stack = max_num_frames;
+ } else {
+ const size_t needed_bytes = max_num_frames * sizeof(stack[0]);
+ void* p = Allocate(needed_bytes);
+ if (p != nullptr) { // We got the space.
+ num_stack = max_num_frames;
+ stack = reinterpret_cast<void**>(p);
+ allocated_bytes = needed_bytes;
+ }
+ }
+
+ size_t depth = absl::GetStackTrace(stack, num_stack, min_dropped_frames + 1);
+ for (size_t i = 0; i < depth; i++) {
+ if (symbolize_stacktrace) {
+ DumpPCAndSymbol(writer, writer_arg, stack[i], " ");
+ } else {
+ DumpPC(writer, writer_arg, stack[i], " ");
+ }
+ }
+
+ auto hook = GetDebugStackTraceHook();
+ if (hook != nullptr) {
+ (*hook)(stack, depth, writer, writer_arg);
+ }
+
+ if (allocated_bytes != 0) Deallocate(stack, allocated_bytes);
+}
+
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/debugging/internal/examine_stack.h b/absl/debugging/internal/examine_stack.h
index 61f0056..190af87 100644
--- a/absl/debugging/internal/examine_stack.h
+++ b/absl/debugging/internal/examine_stack.h
@@ -31,7 +31,7 @@
// `hook` that is called each time DumpStackTrace() is called.
// `hook` may be called from a signal handler.
typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth,
- OutputWriter writer, void* writer_arg);
+ OutputWriter* writer, void* writer_arg);
// Registration of SymbolizeUrlEmitter for use inside of a signal handler.
// This is inherently unsafe and must be signal safe code.
@@ -41,14 +41,21 @@
// Returns the program counter from signal context, or nullptr if
// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
-void* GetProgramCounter(void* vuc);
+void* GetProgramCounter(void* const vuc);
-// Uses `writerfn` to dump the program counter, stack trace, and stack
+// Uses `writer` to dump the program counter, stack trace, and stack
// frame sizes.
-void DumpPCAndFrameSizesAndStackTrace(
- void* pc, void* const stack[], int frame_sizes[], int depth,
- int min_dropped_frames, bool symbolize_stacktrace,
- void (*writerfn)(const char*, void*), void* writerfn_arg);
+void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
+ int frame_sizes[], int depth,
+ int min_dropped_frames,
+ bool symbolize_stacktrace,
+ OutputWriter* writer, void* writer_arg);
+
+// Dump current stack trace omitting the topmost `min_dropped_frames` stack
+// frames.
+void DumpStackTrace(int min_dropped_frames, int max_num_frames,
+ bool symbolize_stacktrace, OutputWriter* writer,
+ void* writer_arg);
} // namespace debugging_internal
ABSL_NAMESPACE_END
diff --git a/absl/debugging/symbolize_test.cc b/absl/debugging/symbolize_test.cc
index a62fa35..3165c6e 100644
--- a/absl/debugging/symbolize_test.cc
+++ b/absl/debugging/symbolize_test.cc
@@ -483,7 +483,8 @@
}
}
-#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target)
+#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \
+ ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP))
// Test that we correctly identify bounds of Thumb functions on ARM.
//
// Thumb functions have the lowest-order bit set in their addresses in the ELF
@@ -502,6 +503,10 @@
// bit in the Thumb function's entry point. It will correctly compute the end of
// the Thumb function, it will find no overlap between the Thumb and ARM
// functions, and it will return the name of the ARM function.
+//
+// Unfortunately we cannot perform this test on armv6 or lower systems that use
+// the hard float ABI because gcc refuses to compile thumb functions on such
+// systems with a "sorry, unimplemented: Thumb-1 hard-float VFP ABI" error.
__attribute__((target("thumb"))) int ArmThumbOverlapThumb(int x) {
return x * x * x;
@@ -521,7 +526,8 @@
#endif
}
-#endif // defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target)
+#endif // defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && ((__ARM_ARCH >= 7)
+ // || !defined(__ARM_PCS_VFP))
#elif defined(_WIN32)
#if !defined(ABSL_CONSUME_DLL)
@@ -596,7 +602,8 @@
TestWithPCInsideInlineFunction();
TestWithPCInsideNonInlineFunction();
TestWithReturnAddress();
-#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target)
+#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \
+ ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP))
TestArmThumbOverlap();
#endif
#endif
diff --git a/absl/numeric/int128.h b/absl/numeric/int128.h
index c7ad96b..7a899ee 100644
--- a/absl/numeric/int128.h
+++ b/absl/numeric/int128.h
@@ -44,7 +44,7 @@
// builtin type. We need to make sure not to define operator wchar_t()
// alongside operator unsigned short() in these instances.
#define ABSL_INTERNAL_WCHAR_T __wchar_t
-#if defined(_M_X64)
+#if defined(_M_X64) && !defined(_M_ARM64EC)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif // defined(_M_X64)
@@ -980,7 +980,7 @@
// can be used for uint128 storage.
return static_cast<unsigned __int128>(lhs) *
static_cast<unsigned __int128>(rhs);
-#elif defined(_MSC_VER) && defined(_M_X64)
+#elif defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
uint64_t carry;
uint64_t low = _umul128(Uint128Low64(lhs), Uint128Low64(rhs), &carry);
return MakeUint128(Uint128Low64(lhs) * Uint128High64(rhs) +
diff --git a/absl/time/internal/cctz/src/time_zone_lookup_test.cc b/absl/time/internal/cctz/src/time_zone_lookup_test.cc
index 0226ab7..134d143 100644
--- a/absl/time/internal/cctz/src/time_zone_lookup_test.cc
+++ b/absl/time/internal/cctz/src/time_zone_lookup_test.cc
@@ -1182,6 +1182,44 @@
// We have a transition but we don't know which one.
}
+TEST(NextTransition, Scan) {
+ for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) {
+ time_zone tz;
+ if (!load_time_zone(*np, &tz)) {
+ continue; // tolerate kTimeZoneNames/zoneinfo skew
+ }
+ SCOPED_TRACE(testing::Message() << "In " << *np);
+
+ auto tp = time_point<absl::time_internal::cctz::seconds>::min();
+ time_zone::civil_transition trans;
+ while (tz.next_transition(tp, &trans)) {
+ time_zone::civil_lookup from_cl = tz.lookup(trans.from);
+ EXPECT_NE(from_cl.kind, time_zone::civil_lookup::REPEATED);
+ time_zone::civil_lookup to_cl = tz.lookup(trans.to);
+ EXPECT_NE(to_cl.kind, time_zone::civil_lookup::SKIPPED);
+
+ auto trans_tp = to_cl.trans;
+ time_zone::absolute_lookup trans_al = tz.lookup(trans_tp);
+ EXPECT_EQ(trans_al.cs, trans.to);
+ auto pre_trans_tp = trans_tp - absl::time_internal::cctz::seconds(1);
+ time_zone::absolute_lookup pre_trans_al = tz.lookup(pre_trans_tp);
+ EXPECT_EQ(pre_trans_al.cs + 1, trans.from);
+
+ auto offset_delta = trans_al.offset - pre_trans_al.offset;
+ EXPECT_EQ(offset_delta, trans.to - trans.from);
+ if (offset_delta == 0) {
+ // This "transition" is only an is_dst or abbr change.
+ EXPECT_EQ(to_cl.kind, time_zone::civil_lookup::UNIQUE);
+ if (trans_al.is_dst == pre_trans_al.is_dst) {
+ EXPECT_STRNE(trans_al.abbr, pre_trans_al.abbr);
+ }
+ }
+
+ tp = trans_tp; // continue scan from transition
+ }
+ }
+}
+
TEST(TimeZoneEdgeCase, AmericaNewYork) {
const time_zone tz = LoadZone("America/New_York");
diff --git a/absl/time/internal/cctz/testdata/README.zoneinfo b/absl/time/internal/cctz/testdata/README.zoneinfo
index 95fb4a9..a41c7b8 100644
--- a/absl/time/internal/cctz/testdata/README.zoneinfo
+++ b/absl/time/internal/cctz/testdata/README.zoneinfo
@@ -13,7 +13,12 @@
trap "rm -fr ${DESTDIR}" 0 2 15
(
cd ${DESTDIR}
- git clone https://github.com/eggert/tz.git
+ if [ -n "${USE_GLOBAL_TZ}" ]
+ then
+ git clone -b global-tz https://github.com/JodaOrg/global-tz.git tz
+ else
+ git clone https://github.com/eggert/tz.git
+ fi
make --directory=tz \
install DESTDIR=${DESTDIR} \
DATAFORM=vanguard \
diff --git a/absl/time/internal/cctz/testdata/version b/absl/time/internal/cctz/testdata/version
index 8ee898b..ca002de 100644
--- a/absl/time/internal/cctz/testdata/version
+++ b/absl/time/internal/cctz/testdata/version
@@ -1 +1 @@
-2021e
+2022a
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas b/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
index 5c9a20b..c042104 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago b/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
index 8d60322..cde8dbb 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
index ccc57c9..effc4df 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
index 906d8d5..aa52bd2 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental b/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
index 8d60322..cde8dbb 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
index 8f83cef..4e02685 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
index 88a6f3b..40d23c0 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
index a575568..d4c3591 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
index 4ea8dae..71819a5 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
Binary files differ
diff --git a/symbols_arm64_dbg.def b/symbols_arm64_dbg.def
index 4ddee91..9a3bfb0 100644
--- a/symbols_arm64_dbg.def
+++ b/symbols_arm64_dbg.def
@@ -1635,7 +1635,8 @@
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@_NAEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
- ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXPEAXQEBQEAXQEAHHH_NP6AXPEBD0@Z0@Z
+ ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXQEAXQEBQEAXQEAHHH_NP6AXPEBDPEAX@Z5@Z
+ ?DumpStackTrace@debugging_internal@absl@@YAXHH_NP6AXPEBDPEAX@Z2@Z
?DurationFromTimespec@absl@@YA?AVDuration@1@Utimespec@@@Z
?DurationFromTimeval@absl@@YA?AVDuration@1@Utimeval@@@Z
?Edge@CordRepBtree@cord_internal@absl@@QEBAPEAUCordRep@23@W4EdgeType@123@@Z
@@ -1831,7 +1832,7 @@
?GetPayloads@Status@absl@@AEAAPEAV?$InlinedVector@UPayload@status_internal@absl@@$00V?$allocator@UPayload@status_internal@absl@@@__1@std@@@2@XZ
?GetPayloads@Status@absl@@AEBAPEBV?$InlinedVector@UPayload@status_internal@absl@@$00V?$allocator@UPayload@status_internal@absl@@@__1@std@@@2@XZ
?GetPrependBuffer@CordRepRing@cord_internal@absl@@QEAA?AV?$Span@D@3@_K@Z
- ?GetProgramCounter@debugging_internal@absl@@YAPEAXPEAX@Z
+ ?GetProgramCounter@debugging_internal@absl@@YAPEAXQEAX@Z
?GetRepData@CordRepRing@cord_internal@absl@@SAPEBDPEBUCordRep@23@@Z
?GetRepHi@time_internal@absl@@YA_JVDuration@2@@Z
?GetRepLo@time_internal@absl@@YAIVDuration@2@@Z
@@ -1874,7 +1875,6 @@
?HasEdge@GraphCycles@synchronization_internal@absl@@QEBA_NUGraphId@23@0@Z
?HasNode@GraphCycles@synchronization_internal@absl@@QEAA_NUGraphId@23@@Z
?HasRandenHwAesImplementation@random_internal@absl@@YA_NXZ
- ?HashSeed@container_internal@absl@@YA_KPEBW4ctrl_t@12@@Z
?Head@CordzInfo@cord_internal@absl@@SAPEAV123@AEBVCordzSnapshot@23@@Z
?HexStringToBytes@absl@@YA?AV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@Vstring_view@1@@Z
?HideMask@base_internal@absl@@YA_KXZ
@@ -2076,6 +2076,7 @@
?ParsePosixSpec@cctz@time_internal@absl@@YA_NAEBV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@PEAUPosixTimeZone@123@@Z
?ParseTime@absl@@YA_NVstring_view@1@0PEAVTime@1@PEAV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@@Z
?ParseTime@absl@@YA_NVstring_view@1@0VTimeZone@1@PEAVTime@1@PEAV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@@Z
+ ?PerTableSalt@container_internal@absl@@YA_KPEBW4ctrl_t@12@@Z
?PermissionDeniedError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
?Piece@AlphaNum@absl@@QEBA?AVstring_view@2@XZ
?PiecewiseChunkSize@hash_internal@absl@@YA_KXZ
diff --git a/symbols_arm64_rel.def b/symbols_arm64_rel.def
index 04fd94a..5f92eee 100644
--- a/symbols_arm64_rel.def
+++ b/symbols_arm64_rel.def
@@ -356,7 +356,8 @@
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@_NAEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
- ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXPEAXQEBQEAXQEAHHH_NP6AXPEBD0@Z0@Z
+ ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXQEAXQEBQEAXQEAHHH_NP6AXPEBDPEAX@Z5@Z
+ ?DumpStackTrace@debugging_internal@absl@@YAXHH_NP6AXPEBDPEAX@Z2@Z
?DurationFromTimespec@absl@@YA?AVDuration@1@Utimespec@@@Z
?DurationFromTimeval@absl@@YA?AVDuration@1@Utimeval@@@Z
?EmptyString@Status@absl@@CAPEBV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@XZ
@@ -481,7 +482,7 @@
?GetParentStack@CordzInfo@cord_internal@absl@@QEBA?AV?$Span@QEAX@3@XZ
?GetPayload@Status@absl@@QEBA?AV?$optional@VCord@absl@@@2@Vstring_view@2@@Z
?GetPrependBuffer@CordRepRing@cord_internal@absl@@QEAA?AV?$Span@D@3@_K@Z
- ?GetProgramCounter@debugging_internal@absl@@YAPEAXPEAX@Z
+ ?GetProgramCounter@debugging_internal@absl@@YAPEAXQEAX@Z
?GetSaltMaterial@random_internal@absl@@YA?AV?$optional@I@2@XZ
?GetSkipCount@ExponentialBiased@profiling_internal@absl@@QEAA_J_J@Z
?GetStack@CordzInfo@cord_internal@absl@@QEBA?AV?$Span@QEAX@3@XZ
diff --git a/symbols_x64_dbg.def b/symbols_x64_dbg.def
index 143d703..25dd1ef 100644
--- a/symbols_x64_dbg.def
+++ b/symbols_x64_dbg.def
@@ -1638,7 +1638,8 @@
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@_NAEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
- ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXPEAXQEBQEAXQEAHHH_NP6AXPEBD0@Z0@Z
+ ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXQEAXQEBQEAXQEAHHH_NP6AXPEBDPEAX@Z5@Z
+ ?DumpStackTrace@debugging_internal@absl@@YAXHH_NP6AXPEBDPEAX@Z2@Z
?DurationFromTimespec@absl@@YA?AVDuration@1@Utimespec@@@Z
?DurationFromTimeval@absl@@YA?AVDuration@1@Utimeval@@@Z
?Edge@CordRepBtree@cord_internal@absl@@QEBAPEAUCordRep@23@W4EdgeType@123@@Z
@@ -1833,7 +1834,7 @@
?GetPayloads@Status@absl@@AEAAPEAV?$InlinedVector@UPayload@status_internal@absl@@$00V?$allocator@UPayload@status_internal@absl@@@__1@std@@@2@XZ
?GetPayloads@Status@absl@@AEBAPEBV?$InlinedVector@UPayload@status_internal@absl@@$00V?$allocator@UPayload@status_internal@absl@@@__1@std@@@2@XZ
?GetPrependBuffer@CordRepRing@cord_internal@absl@@QEAA?AV?$Span@D@3@_K@Z
- ?GetProgramCounter@debugging_internal@absl@@YAPEAXPEAX@Z
+ ?GetProgramCounter@debugging_internal@absl@@YAPEAXQEAX@Z
?GetRepData@CordRepRing@cord_internal@absl@@SAPEBDPEBUCordRep@23@@Z
?GetRepHi@time_internal@absl@@YA_JVDuration@2@@Z
?GetRepLo@time_internal@absl@@YAIVDuration@2@@Z
@@ -1876,7 +1877,6 @@
?HasEdge@GraphCycles@synchronization_internal@absl@@QEBA_NUGraphId@23@0@Z
?HasNode@GraphCycles@synchronization_internal@absl@@QEAA_NUGraphId@23@@Z
?HasRandenHwAesImplementation@random_internal@absl@@YA_NXZ
- ?HashSeed@container_internal@absl@@YA_KPEBW4ctrl_t@12@@Z
?Head@CordzInfo@cord_internal@absl@@SAPEAV123@AEBVCordzSnapshot@23@@Z
?HexStringToBytes@absl@@YA?AV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@Vstring_view@1@@Z
?HideMask@base_internal@absl@@YA_KXZ
@@ -2077,6 +2077,7 @@
?ParsePosixSpec@cctz@time_internal@absl@@YA_NAEBV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@PEAUPosixTimeZone@123@@Z
?ParseTime@absl@@YA_NVstring_view@1@0PEAVTime@1@PEAV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@@Z
?ParseTime@absl@@YA_NVstring_view@1@0VTimeZone@1@PEAVTime@1@PEAV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@@Z
+ ?PerTableSalt@container_internal@absl@@YA_KPEBW4ctrl_t@12@@Z
?PermissionDeniedError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
?Piece@AlphaNum@absl@@QEBA?AVstring_view@2@XZ
?PiecewiseChunkSize@hash_internal@absl@@YA_KXZ
diff --git a/symbols_x64_rel.def b/symbols_x64_rel.def
index cf4155b..750761d 100644
--- a/symbols_x64_rel.def
+++ b/symbols_x64_rel.def
@@ -357,7 +357,8 @@
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@_NAEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
- ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXPEAXQEBQEAXQEAHHH_NP6AXPEBD0@Z0@Z
+ ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXQEAXQEBQEAXQEAHHH_NP6AXPEBDPEAX@Z5@Z
+ ?DumpStackTrace@debugging_internal@absl@@YAXHH_NP6AXPEBDPEAX@Z2@Z
?DurationFromTimespec@absl@@YA?AVDuration@1@Utimespec@@@Z
?DurationFromTimeval@absl@@YA?AVDuration@1@Utimeval@@@Z
?EmptyString@Status@absl@@CAPEBV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@XZ
@@ -481,7 +482,7 @@
?GetParentStack@CordzInfo@cord_internal@absl@@QEBA?AV?$Span@QEAX@3@XZ
?GetPayload@Status@absl@@QEBA?AV?$optional@VCord@absl@@@2@Vstring_view@2@@Z
?GetPrependBuffer@CordRepRing@cord_internal@absl@@QEAA?AV?$Span@D@3@_K@Z
- ?GetProgramCounter@debugging_internal@absl@@YAPEAXPEAX@Z
+ ?GetProgramCounter@debugging_internal@absl@@YAPEAXQEAX@Z
?GetSaltMaterial@random_internal@absl@@YA?AV?$optional@I@2@XZ
?GetSkipCount@ExponentialBiased@profiling_internal@absl@@QEAA_J_J@Z
?GetStack@CordzInfo@cord_internal@absl@@QEBA?AV?$Span@QEAX@3@XZ
diff --git a/symbols_x64_rel_asan.def b/symbols_x64_rel_asan.def
index 8e99dda..14daa87 100644
--- a/symbols_x64_rel_asan.def
+++ b/symbols_x64_rel_asan.def
@@ -360,7 +360,8 @@
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@AEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPEBUCordRep@23@Vstring_view@3@_NAEAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
- ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXPEAXQEBQEAXQEAHHH_NP6AXPEBD0@Z0@Z
+ ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXQEAXQEBQEAXQEAHHH_NP6AXPEBDPEAX@Z5@Z
+ ?DumpStackTrace@debugging_internal@absl@@YAXHH_NP6AXPEBDPEAX@Z2@Z
?DurationFromTimespec@absl@@YA?AVDuration@1@Utimespec@@@Z
?DurationFromTimeval@absl@@YA?AVDuration@1@Utimeval@@@Z
?EmptyString@Status@absl@@CAPEBV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@XZ
@@ -485,7 +486,7 @@
?GetParentStack@CordzInfo@cord_internal@absl@@QEBA?AV?$Span@QEAX@3@XZ
?GetPayload@Status@absl@@QEBA?AV?$optional@VCord@absl@@@2@Vstring_view@2@@Z
?GetPrependBuffer@CordRepRing@cord_internal@absl@@QEAA?AV?$Span@D@3@_K@Z
- ?GetProgramCounter@debugging_internal@absl@@YAPEAXPEAX@Z
+ ?GetProgramCounter@debugging_internal@absl@@YAPEAXQEAX@Z
?GetSaltMaterial@random_internal@absl@@YA?AV?$optional@I@2@XZ
?GetSkipCount@ExponentialBiased@profiling_internal@absl@@QEAA_J_J@Z
?GetStack@CordzInfo@cord_internal@absl@@QEBA?AV?$Span@QEAX@3@XZ
diff --git a/symbols_x86_dbg.def b/symbols_x86_dbg.def
index 1e080e5..6e6b741 100644
--- a/symbols_x86_dbg.def
+++ b/symbols_x86_dbg.def
@@ -1633,7 +1633,8 @@
?Dump@CordRepBtree@cord_internal@absl@@SAXPBUCordRep@23@AAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPBUCordRep@23@Vstring_view@3@AAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPBUCordRep@23@Vstring_view@3@_NAAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
- ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXPAXQBQAXQAHHH_NP6AXPBD0@Z0@Z
+ ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXQAXQBQAXQAHHH_NP6AXPBDPAX@Z5@Z
+ ?DumpStackTrace@debugging_internal@absl@@YAXHH_NP6AXPBDPAX@Z2@Z
?DurationFromTimespec@absl@@YA?AVDuration@1@Utimespec@@@Z
?DurationFromTimeval@absl@@YA?AVDuration@1@Utimeval@@@Z
?Edge@CordRepBtree@cord_internal@absl@@QBEPAUCordRep@23@I@Z
@@ -1828,7 +1829,7 @@
?GetPayloads@Status@absl@@AAEPAV?$InlinedVector@UPayload@status_internal@absl@@$00V?$allocator@UPayload@status_internal@absl@@@__1@std@@@2@XZ
?GetPayloads@Status@absl@@ABEPBV?$InlinedVector@UPayload@status_internal@absl@@$00V?$allocator@UPayload@status_internal@absl@@@__1@std@@@2@XZ
?GetPrependBuffer@CordRepRing@cord_internal@absl@@QAE?AV?$Span@D@3@I@Z
- ?GetProgramCounter@debugging_internal@absl@@YAPAXPAX@Z
+ ?GetProgramCounter@debugging_internal@absl@@YAPAXQAX@Z
?GetRepData@CordRepRing@cord_internal@absl@@SAPBDPBUCordRep@23@@Z
?GetRepHi@time_internal@absl@@YA_JVDuration@2@@Z
?GetRepLo@time_internal@absl@@YAIVDuration@2@@Z
@@ -1871,7 +1872,6 @@
?HasEdge@GraphCycles@synchronization_internal@absl@@QBE_NUGraphId@23@0@Z
?HasNode@GraphCycles@synchronization_internal@absl@@QAE_NUGraphId@23@@Z
?HasRandenHwAesImplementation@random_internal@absl@@YA_NXZ
- ?HashSeed@container_internal@absl@@YAIPBW4ctrl_t@12@@Z
?Head@CordzInfo@cord_internal@absl@@SAPAV123@ABVCordzSnapshot@23@@Z
?HexStringToBytes@absl@@YA?AV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@Vstring_view@1@@Z
?HideMask@base_internal@absl@@YAIXZ
@@ -2072,6 +2072,7 @@
?ParsePosixSpec@cctz@time_internal@absl@@YA_NABV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@PAUPosixTimeZone@123@@Z
?ParseTime@absl@@YA_NVstring_view@1@0PAVTime@1@PAV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@@Z
?ParseTime@absl@@YA_NVstring_view@1@0VTimeZone@1@PAVTime@1@PAV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@@Z
+ ?PerTableSalt@container_internal@absl@@YAIPBW4ctrl_t@12@@Z
?PermissionDeniedError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
?Piece@AlphaNum@absl@@QBE?AVstring_view@2@XZ
?PiecewiseChunkSize@hash_internal@absl@@YAIXZ
diff --git a/symbols_x86_rel.def b/symbols_x86_rel.def
index 04efb8a..9440a63 100644
--- a/symbols_x86_rel.def
+++ b/symbols_x86_rel.def
@@ -353,7 +353,8 @@
?Dump@CordRepBtree@cord_internal@absl@@SAXPBUCordRep@23@AAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPBUCordRep@23@Vstring_view@3@AAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
?Dump@CordRepBtree@cord_internal@absl@@SAXPBUCordRep@23@Vstring_view@3@_NAAV?$basic_ostream@DU?$char_traits@D@__1@std@@@__1@std@@@Z
- ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXPAXQBQAXQAHHH_NP6AXPBD0@Z0@Z
+ ?DumpPCAndFrameSizesAndStackTrace@debugging_internal@absl@@YAXQAXQBQAXQAHHH_NP6AXPBDPAX@Z5@Z
+ ?DumpStackTrace@debugging_internal@absl@@YAXHH_NP6AXPBDPAX@Z2@Z
?DurationFromTimespec@absl@@YA?AVDuration@1@Utimespec@@@Z
?DurationFromTimeval@absl@@YA?AVDuration@1@Utimeval@@@Z
?EmptyString@Status@absl@@CAPBV?$basic_string@DU?$char_traits@D@__1@std@@V?$allocator@D@23@@__1@std@@XZ
@@ -477,7 +478,7 @@
?GetParentStack@CordzInfo@cord_internal@absl@@QBE?AV?$Span@QAX@3@XZ
?GetPayload@Status@absl@@QBE?AV?$optional@VCord@absl@@@2@Vstring_view@2@@Z
?GetPrependBuffer@CordRepRing@cord_internal@absl@@QAE?AV?$Span@D@3@I@Z
- ?GetProgramCounter@debugging_internal@absl@@YAPAXPAX@Z
+ ?GetProgramCounter@debugging_internal@absl@@YAPAXQAX@Z
?GetSaltMaterial@random_internal@absl@@YA?AV?$optional@I@2@XZ
?GetSkipCount@ExponentialBiased@profiling_internal@absl@@QAE_J_J@Z
?GetStack@CordzInfo@cord_internal@absl@@QBE?AV?$Span@QAX@3@XZ