Reland "Roll abseil_revision 647ed70eff..6d8e1a5cf0"
This is a reland of commit 33b41dcfb9fa5de4e6cd779b1b1286fe22d387b9
Original change's description:
> Roll abseil_revision 647ed70eff..6d8e1a5cf0
>
> Change Log:
> https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+log/647ed70eff..6d8e1a5cf0
> Full diff:
> https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+/647ed70eff..6d8e1a5cf0
>
> Bug: 475029685
> Change-Id: I26c36c88d4561997bb0ac933bbcff5729d2a0915
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/7367126
> Reviewed-by: Danil Chapovalov <danilchap@chromium.org>
> Commit-Queue: Mirko Bonadei <mbonadei@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#1573094}
Bug: 475029685
Change-Id: Idb31bd800c788e4b7b408581a2595b4ec0844255
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/7511430
Reviewed-by: Danil Chapovalov <danilchap@chromium.org>
Commit-Queue: Mirko Bonadei <mbonadei@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1573591}
NOKEYCHECK=True
GitOrigin-RevId: 78dc40817ce19ac01b50e56eb672ebc0386961bb
diff --git a/BUILD.gn b/BUILD.gn
index a2c0501..c14f530 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -273,6 +273,7 @@
"absl/base:tracing_internal_strong_test",
"absl/base:tracing_internal_weak_test",
"absl/cleanup:cleanup_test",
+ "absl/container:chunked_queue_test",
"absl/container:common_policy_traits_test",
"absl/container:fixed_array_test",
"absl/container:flat_hash_map_test",
diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake
index 02907c5..74c9b6d 100644
--- a/CMake/AbseilDll.cmake
+++ b/CMake/AbseilDll.cmake
@@ -6,6 +6,7 @@
"algorithm/container.h"
"base/attributes.h"
"base/call_once.h"
+ "base/casts.cc"
"base/casts.h"
"base/config.h"
"base/const_init.h"
@@ -67,6 +68,7 @@
"cleanup/internal/cleanup.h"
"container/btree_map.h"
"container/btree_set.h"
+ "container/chunked_queue.h"
"container/hash_container_defaults.h"
"container/fixed_array.h"
"container/flat_hash_map.h"
@@ -74,6 +76,7 @@
"container/inlined_vector.h"
"container/internal/btree.h"
"container/internal/btree_container.h"
+ "container/internal/chunked_queue.h"
"container/internal/common.h"
"container/internal/common_policy_traits.h"
"container/internal/compressed_tuple.h"
diff --git a/README.chromium b/README.chromium
index 13d44aa..1697058 100644
--- a/README.chromium
+++ b/README.chromium
@@ -4,7 +4,7 @@
License: Apache-2.0
License File: LICENSE
Version: N/A
-Revision: 647ed70eff043c28436c047906e2d7281b55ff27
+Revision: 6d8e1a5cf00d6a6b26069b0baf0dd9b855a17459
Update Mechanism: Manual
Security Critical: yes
Shipped: yes
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel
index 2214ebf..9ffa7ac 100644
--- a/absl/base/BUILD.bazel
+++ b/absl/base/BUILD.bazel
@@ -254,6 +254,7 @@
cc_library(
name = "base",
srcs = [
+ "casts.cc",
"internal/cycleclock.cc",
"internal/spinlock.cc",
"internal/sysinfo.cc",
@@ -293,8 +294,6 @@
":config",
":core_headers",
":cycleclock_internal",
- ":dynamic_annotations",
- ":log_severity",
":nullability",
":raw_logging_internal",
":spinlock_wait",
@@ -609,7 +608,7 @@
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
- ":core_headers",
+ ":config",
"@googletest//:gtest",
"@googletest//:gtest_main",
],
diff --git a/absl/base/BUILD.gn b/absl/base/BUILD.gn
index c82e827..a9b6027 100644
--- a/absl/base/BUILD.gn
+++ b/absl/base/BUILD.gn
@@ -160,6 +160,7 @@
absl_source_set("base") {
sources = [
+ "casts.cc",
"internal/cycleclock.cc",
"internal/spinlock.cc",
"internal/sysinfo.cc",
@@ -188,8 +189,6 @@
":config",
":core_headers",
":cycleclock_internal",
- ":dynamic_annotations",
- ":log_severity",
":nullability",
":raw_logging_internal",
":spinlock_wait",
@@ -381,7 +380,7 @@
sources = [ "casts_test.cc" ]
deps = [
":base",
- ":core_headers",
+ ":config",
]
}
diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt
index 9e8f3e5..95c01cf 100644
--- a/absl/base/CMakeLists.txt
+++ b/absl/base/CMakeLists.txt
@@ -261,6 +261,7 @@
"internal/unscaledcycleclock.h"
"internal/unscaledcycleclock_config.h"
SRCS
+ "casts.cc"
"internal/cycleclock.cc"
"internal/spinlock.cc"
"internal/sysinfo.cc"
diff --git a/absl/base/casts.cc b/absl/base/casts.cc
new file mode 100644
index 0000000..d864a8c
--- /dev/null
+++ b/absl/base/casts.cc
@@ -0,0 +1,61 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/casts.h"
+
+#include <cstdlib>
+#include <string>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#include <cxxabi.h>
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace base_internal {
+
+namespace {
+
+std::string DemangleCppString(const char* mangled) {
+ std::string out;
+ int status = 0;
+ char* demangled = nullptr;
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+ demangled = abi::__cxa_demangle(mangled, nullptr, nullptr, &status);
+#endif
+ if (status == 0 && demangled != nullptr) {
+ out.append(demangled);
+ free(demangled);
+ } else {
+ out.append(mangled);
+ }
+ return out;
+}
+
+} // namespace
+
+void BadDownCastCrash(const char* source_type, const char* target_type) {
+ ABSL_RAW_LOG(FATAL, "down cast from %s to %s failed",
+ DemangleCppString(source_type).c_str(),
+ DemangleCppString(target_type).c_str());
+}
+
+} // namespace base_internal
+
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/base/casts.h b/absl/base/casts.h
index 35f5f93..5501e5c 100644
--- a/absl/base/casts.h
+++ b/absl/base/casts.h
@@ -27,6 +27,7 @@
#include <cstring>
#include <memory>
#include <type_traits>
+#include <typeinfo>
#include <utility>
#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
@@ -34,7 +35,10 @@
#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/base/macros.h"
+#include "absl/base/optimization.h"
+#include "absl/base/options.h"
#include "absl/meta/type_traits.h"
namespace absl {
@@ -191,6 +195,112 @@
#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+namespace base_internal {
+
+[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void BadDownCastCrash(
+ const char* source_type, const char* target_type);
+
+template <typename To, typename From>
+inline void ValidateDownCast(From* f ABSL_ATTRIBUTE_UNUSED) {
+ // Assert only if RTTI is enabled and in debug mode or hardened asserts are
+ // enabled.
+#ifdef ABSL_INTERNAL_HAS_RTTI
+#if !defined(NDEBUG) || (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2)
+ // Suppress erroneous nonnull comparison warning on older GCC.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnonnull-compare"
+#endif
+ if (ABSL_PREDICT_FALSE(f != nullptr && dynamic_cast<To>(f) == nullptr)) {
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+ absl::base_internal::BadDownCastCrash(
+ typeid(*f).name(), typeid(std::remove_pointer_t<To>).name());
+ }
+#endif
+#endif
+}
+
+} // namespace base_internal
+
+// An "upcast", i.e. a conversion from a pointer to an object to a pointer to a
+// base subobject, always succeeds if the base is unambiguous and accessible,
+// and so it's fine to use implicit_cast.
+//
+// A "downcast", i.e. a conversion from a pointer to an object to a pointer
+// to a more-derived object that may contain the original object as a base
+// subobject, cannot safely be done using static_cast, because you do not
+// generally know whether the source object is really the base subobject of
+// a containing, more-derived object of the target type. Thus, when you
+// downcast in a polymorphic type hierarchy, you should use the following
+// function template.
+//
+// This function only returns null when the input is null. In debug mode, we
+// use dynamic_cast to double-check whether the downcast is legal (we die if
+// it's not). In normal mode, we do the efficient static_cast instead. Because
+// the process will die in debug mode, it's important to test to make sure the
+// cast is legal before calling this function!
+//
+// dynamic_cast should be avoided except as allowed by the style guide
+// (https://google.github.io/styleguide/cppguide.html#Run-Time_Type_Information__RTTI_).
+
+template <typename To, typename From> // use like this: down_cast<T*>(foo);
+[[nodiscard]]
+inline To down_cast(From* f) { // so we only accept pointers
+ static_assert(std::is_pointer<To>::value, "target type not a pointer");
+ // dynamic_cast allows casting to the same type or a more cv-qualified
+ // version of the same type without them being polymorphic.
+ if constexpr (!std::is_same<std::remove_cv_t<std::remove_pointer_t<To>>,
+ std::remove_cv_t<From>>::value) {
+ static_assert(std::is_polymorphic<From>::value,
+ "source type must be polymorphic");
+ static_assert(std::is_polymorphic<std::remove_pointer_t<To>>::value,
+ "target type must be polymorphic");
+ }
+ static_assert(
+ std::is_convertible<std::remove_cv_t<std::remove_pointer_t<To>>*,
+ std::remove_cv_t<From>*>::value,
+ "target type not derived from source type");
+
+ absl::base_internal::ValidateDownCast<To>(f);
+
+ return static_cast<To>(f);
+}
+
+// Overload of down_cast for references. Use like this:
+// absl::down_cast<T&>(foo). The code is slightly convoluted because we're still
+// using the pointer form of dynamic cast. (The reference form throws an
+// exception if it fails.)
+//
+// There's no need for a special const overload either for the pointer
+// or the reference form. If you call down_cast with a const T&, the
+// compiler will just bind From to const T.
+template <typename To, typename From>
+[[nodiscard]]
+inline To down_cast(From& f) {
+ static_assert(std::is_lvalue_reference<To>::value,
+ "target type not a reference");
+ // dynamic_cast allows casting to the same type or a more cv-qualified
+ // version of the same type without them being polymorphic.
+ if constexpr (!std::is_same<std::remove_cv_t<std::remove_reference_t<To>>,
+ std::remove_cv_t<From>>::value) {
+ static_assert(std::is_polymorphic<From>::value,
+ "source type must be polymorphic");
+ static_assert(std::is_polymorphic<std::remove_reference_t<To>>::value,
+ "target type must be polymorphic");
+ }
+ static_assert(
+ std::is_convertible<std::remove_cv_t<std::remove_reference_t<To>>*,
+ std::remove_cv_t<From>*>::value,
+ "target type not derived from source type");
+
+ absl::base_internal::ValidateDownCast<std::remove_reference_t<To>*>(
+ std::addressof(f));
+
+ return static_cast<To>(f);
+}
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/casts_test.cc b/absl/base/casts_test.cc
index 25f92cc..772225e 100644
--- a/absl/base/casts_test.cc
+++ b/absl/base/casts_test.cc
@@ -18,40 +18,134 @@
#include <utility>
#include "gtest/gtest.h"
+#include "absl/base/options.h"
namespace {
-struct Base {
- explicit Base(int value) : x(value) {}
- Base(const Base& other) = delete;
- Base& operator=(const Base& other) = delete;
+struct BaseForImplicitCast {
+ explicit BaseForImplicitCast(int value) : x(value) {}
+ BaseForImplicitCast(const BaseForImplicitCast& other) = delete;
+ BaseForImplicitCast& operator=(const BaseForImplicitCast& other) = delete;
int x;
};
-struct Derived : Base {
- explicit Derived(int value) : Base(value) {}
+struct DerivedForImplicitCast : BaseForImplicitCast {
+ explicit DerivedForImplicitCast(int value) : BaseForImplicitCast(value) {}
};
+static_assert(std::is_same_v<decltype(absl::implicit_cast<BaseForImplicitCast&>(
+ std::declval<DerivedForImplicitCast&>())),
+ BaseForImplicitCast&>);
static_assert(
- std::is_same_v<
- decltype(absl::implicit_cast<Base&>(std::declval<Derived&>())), Base&>);
-static_assert(std::is_same_v<decltype(absl::implicit_cast<const Base&>(
- std::declval<Derived>())),
- const Base&>);
+ std::is_same_v<decltype(absl::implicit_cast<const BaseForImplicitCast&>(
+ std::declval<DerivedForImplicitCast>())),
+ const BaseForImplicitCast&>);
TEST(ImplicitCastTest, LValueReference) {
- Derived derived(5);
- EXPECT_EQ(&absl::implicit_cast<Base&>(derived), &derived);
- EXPECT_EQ(&absl::implicit_cast<const Base&>(derived), &derived);
+ DerivedForImplicitCast derived(5);
+ EXPECT_EQ(&absl::implicit_cast<BaseForImplicitCast&>(derived), &derived);
+ EXPECT_EQ(&absl::implicit_cast<const BaseForImplicitCast&>(derived),
+ &derived);
}
TEST(ImplicitCastTest, RValueReference) {
- Derived derived(5);
- Base&& base = absl::implicit_cast<Base&&>(std::move(derived));
+ DerivedForImplicitCast derived(5);
+ BaseForImplicitCast&& base =
+ absl::implicit_cast<BaseForImplicitCast&&>(std::move(derived));
EXPECT_EQ(&base, &derived);
- const Derived cderived(6);
- const Base&& cbase = absl::implicit_cast<const Base&&>(std::move(cderived));
+ const DerivedForImplicitCast cderived(6);
+ const BaseForImplicitCast&& cbase =
+ absl::implicit_cast<const BaseForImplicitCast&&>(std::move(cderived));
EXPECT_EQ(&cbase, &cderived);
}
+class BaseForDownCast {
+ public:
+ virtual ~BaseForDownCast() = default;
+};
+
+class DerivedForDownCast : public BaseForDownCast {};
+class Derived2ForDownCast : public BaseForDownCast {};
+
+TEST(DownCastTest, Pointer) {
+ DerivedForDownCast derived;
+ BaseForDownCast* const base_ptr = &derived;
+
+ // Tests casting a BaseForDownCast* to a DerivedForDownCast*.
+ EXPECT_EQ(&derived, absl::down_cast<DerivedForDownCast*>(base_ptr));
+
+ // Tests casting a const BaseForDownCast* to a const DerivedForDownCast*.
+ const BaseForDownCast* const_base_ptr = base_ptr;
+ EXPECT_EQ(&derived,
+ absl::down_cast<const DerivedForDownCast*>(const_base_ptr));
+
+ // Tests casting a BaseForDownCast* to a const DerivedForDownCast*.
+ EXPECT_EQ(&derived, absl::down_cast<const DerivedForDownCast*>(base_ptr));
+
+ // Tests casting a BaseForDownCast* to a BaseForDownCast* (an identity cast).
+ EXPECT_EQ(base_ptr, absl::down_cast<BaseForDownCast*>(base_ptr));
+
+ // Tests down casting NULL.
+ EXPECT_EQ(nullptr,
+ (absl::down_cast<DerivedForDownCast*, BaseForDownCast>(nullptr)));
+
+ // Tests a bad downcast. We have to disguise the badness just enough
+ // that the compiler doesn't warn about it at compile time.
+ BaseForDownCast* base2 = new BaseForDownCast();
+#if GTEST_HAS_DEATH_TEST && (!defined(NDEBUG) || (ABSL_OPTION_HARDENED == 1 || \
+ ABSL_OPTION_HARDENED == 2))
+ EXPECT_DEATH(static_cast<void>(absl::down_cast<DerivedForDownCast*>(base2)),
+ ".*down cast from .*BaseForDownCast.* to "
+ ".*DerivedForDownCast.* failed.*");
+#endif
+ delete base2;
+}
+
+TEST(DownCastTest, Reference) {
+ DerivedForDownCast derived;
+ BaseForDownCast& base_ref = derived;
+
+ // Tests casting a BaseForDownCast& to a DerivedForDownCast&.
+ // NOLINTNEXTLINE(runtime/casting)
+ EXPECT_EQ(&derived, &absl::down_cast<DerivedForDownCast&>(base_ref));
+
+ // Tests casting a const BaseForDownCast& to a const DerivedForDownCast&.
+ const BaseForDownCast& const_base_ref = base_ref;
+ // NOLINTNEXTLINE(runtime/casting)
+ EXPECT_EQ(&derived,
+ &absl::down_cast<const DerivedForDownCast&>(const_base_ref));
+
+ // Tests casting a BaseForDownCast& to a const DerivedForDownCast&.
+ // NOLINTNEXTLINE(runtime/casting)
+ EXPECT_EQ(&derived, &absl::down_cast<const DerivedForDownCast&>(base_ref));
+
+ // Tests casting a BaseForDownCast& to a BaseForDownCast& (an identity cast).
+ // NOLINTNEXTLINE(runtime/casting)
+ EXPECT_EQ(&base_ref, &absl::down_cast<BaseForDownCast&>(base_ref));
+
+ // Tests a bad downcast. We have to disguise the badness just enough
+ // that the compiler doesn't warn about it at compile time.
+ BaseForDownCast& base2 = *new BaseForDownCast();
+#if GTEST_HAS_DEATH_TEST && (!defined(NDEBUG) || (ABSL_OPTION_HARDENED == 1 || \
+ ABSL_OPTION_HARDENED == 2))
+ EXPECT_DEATH(static_cast<void>(absl::down_cast<DerivedForDownCast&>(base2)),
+ ".*down cast from .*BaseForDownCast.* to "
+ ".*DerivedForDownCast.* failed.*");
+#endif
+ delete &base2;
+}
+
+TEST(DownCastTest, ErrorMessage) {
+ DerivedForDownCast derived;
+ BaseForDownCast& base = derived;
+ (void)base;
+
+#if GTEST_HAS_DEATH_TEST && (!defined(NDEBUG) || (ABSL_OPTION_HARDENED == 1 || \
+ ABSL_OPTION_HARDENED == 2))
+ EXPECT_DEATH(static_cast<void>(absl::down_cast<Derived2ForDownCast&>(base)),
+ ".*down cast from .*DerivedForDownCast.* to "
+ ".*Derived2ForDownCast.* failed.*");
+#endif
+}
+
} // namespace
diff --git a/absl/cleanup/cleanup.h b/absl/cleanup/cleanup.h
index 311e482..632ec6e 100644
--- a/absl/cleanup/cleanup.h
+++ b/absl/cleanup/cleanup.h
@@ -19,6 +19,10 @@
// `absl::Cleanup` implements the scope guard idiom, invoking the contained
// callback's `operator()() &&` on scope exit.
//
+// This class doesn't allocate or take any locks, and is safe to use in a signal
+// handler. Of course the callback with which it is constructed also must be
+// signal safe in order for this to be useful.
+//
// Example:
//
// ```
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index 15fb0a5..e90aaec 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -1349,3 +1349,46 @@
"@google_benchmark//:benchmark_main",
],
)
+
+cc_library(
+ name = "chunked_queue",
+ srcs = ["internal/chunked_queue.h"],
+ hdrs = ["chunked_queue.h"],
+ deps = [
+ ":layout",
+ "//absl/base:config",
+ "//absl/base:core_headers",
+ "//absl/base:iterator_traits_internal",
+ ],
+)
+
+cc_test(
+ name = "chunked_queue_test",
+ size = "small",
+ srcs = ["chunked_queue_test.cc"],
+ deps = [
+ ":chunked_queue",
+ ":test_allocator",
+ "//absl/base:core_headers",
+ "//absl/strings",
+ "@googletest//:gtest",
+ "@googletest//:gtest_main",
+ ],
+)
+
+cc_binary(
+ name = "chunked_queue_benchmark",
+ testonly = True,
+ srcs = ["chunked_queue_benchmark.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["benchmark"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":chunked_queue",
+ "//absl/random",
+ "//absl/status",
+ "//absl/strings:cord",
+ "@google_benchmark//:benchmark_main",
+ ],
+)
diff --git a/absl/container/BUILD.gn b/absl/container/BUILD.gn
index 3ccf6aa..22a4ca8 100644
--- a/absl/container/BUILD.gn
+++ b/absl/container/BUILD.gn
@@ -720,4 +720,27 @@
"//third_party/abseil-cpp/absl/strings:string_view",
]
}
+
# absl_test("linked_hash_map_benchmark")
+
+absl_source_set("chunked_queue") {
+ sources = [ "internal/chunked_queue.h" ]
+ public = [ "chunked_queue.h" ]
+ deps = [
+ ":layout",
+ "//third_party/abseil-cpp/absl/base:config",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/base:iterator_traits_internal",
+ ]
+}
+
+absl_test("chunked_queue_test") {
+ sources = [ "chunked_queue_test.cc" ]
+ deps = [
+ ":chunked_queue",
+ ":test_allocator",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/strings",
+ ]
+}
+# absl_test("chunked_queue_benchmark")
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index f1ea9e2..365c6ea 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -1202,3 +1202,38 @@
absl::unordered_set_modifiers_test
GTest::gmock_main
)
+
+absl_cc_library(
+ NAME
+ chunked_queue
+ HDRS
+ "chunked_queue.h"
+ "internal/chunked_queue.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ LINKOPTS
+ ${ABSL_DEFAULT_LINKOPTS}
+ DEPS
+ absl::config
+ absl::core_headers
+ absl::iterator_traits_internal
+ absl::layout
+)
+
+absl_cc_test(
+ NAME
+ chunked_queue_test
+ SRCS
+ "chunked_queue_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ LINKOPTS
+ ${ABSL_DEFAULT_LINKOPTS}
+ DEPS
+ absl::chunked_queue
+ absl::config
+ absl::core_headers
+ absl::strings
+ absl::test_allocator
+ GTest::gmock_main
+)
diff --git a/absl/container/chunked_queue.h b/absl/container/chunked_queue.h
new file mode 100644
index 0000000..d5b1184
--- /dev/null
+++ b/absl/container/chunked_queue.h
@@ -0,0 +1,755 @@
+// Copyright 2025 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: chunked_queue.h
+// -----------------------------------------------------------------------------
+//
+// `std::deque` provides random access and fast push/pop back/front. It is
+// implemented as an array of fixed blocks. It provides no control of block size
+// and implementations differ; libstdc++ tries to allocate blocks of ~512 bytes
+// and libc++ tries for blocks of ~4k bytes.
+//
+// `absl::chunked_queue` provides the same minus random access. It is
+// implemented as a double-linked list of fixed or variable sized blocks.
+//
+// `absl::chunked_queue` is useful when memory usage is paramount as it provides
+// finegrained and configurable block sizing.
+//
+// The interface supported by this class is limited to:
+//
+// empty()
+// size()
+// max_size()
+// shrink_to_fit()
+// resize()
+// assign()
+// push_back()
+// emplace_back()
+// pop_front()
+// front()
+// back()
+// swap()
+// clear()
+// begin(), end()
+// cbegin(), cend()
+//
+// === ADVANCED USAGE
+//
+// == clear()
+//
+// As an optimization clear() leaves the first block of the chunked_queue
+// allocated (but empty). So clear will not delete all memory of the container.
+// In order to do so, call shrink_to_fit() or swap the container with an empty
+// one.
+//
+// absl::chunked_queue<int64> q = {1, 2, 3};
+// q.clear();
+// q.shrink_to_fit();
+//
+// == block size customization
+//
+// chunked_queue allows customization of the block size for each block. By
+// default the block size is set to 1 element and the size doubles for the next
+// block until it reaches the default max block size, which is 128 elements.
+//
+// = fixed size
+//
+// When only the first block size parameter is specified, it sets a fixed block
+// size for all blocks:
+//
+// chunked_queue<T, 32>: 32 elements per block
+//
+// The smaller the block size, the less the memory usage for small queues at the
+// cost of performance. Caveat: For large queues, a smaller block size will
+// increase memory usage, and reduce performance.
+//
+// = variable size
+//
+// When both block size parameters are specified, they set the min and max block
+// sizes for the blocks. Initially the queue starts with the min block size and
+// as it grows, the size of each block grows until it reaches the max block
+// size.
+// New blocks are double the size of the tail block (so they at least
+// double the size of the queue).
+//
+// chunked_queue<T, 4, 64>: first block 4 elements, second block 8 elements,
+// third block 16 elements, fourth block 32 elements,
+// all other blocks 64 elements
+//
+// One can specify a min and max such that small queues will not waste memory
+// and large queues will not have too many blocks.
+
+#ifndef ABSL_CONTAINER_CHUNKED_QUEUE_H_
+#define ABSL_CONTAINER_CHUNKED_QUEUE_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/iterator_traits.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/chunked_queue.h"
+#include "absl/container/internal/layout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+template <typename T, size_t BLo = 0, size_t BHi = BLo,
+ typename Allocator = std::allocator<T>>
+class chunked_queue {
+ public:
+ static constexpr size_t kBlockSizeMin = (BLo == 0 && BHi == 0) ? 1 : BLo;
+ static constexpr size_t kBlockSizeMax = (BLo == 0 && BHi == 0) ? 128 : BHi;
+
+ private:
+ static_assert(kBlockSizeMin > 0, "Min block size cannot be zero");
+ static_assert(kBlockSizeMin <= kBlockSizeMax, "Invalid block size bounds");
+
+ using Block = container_internal::ChunkedQueueBlock<T, Allocator>;
+ using AllocatorTraits = std::allocator_traits<Allocator>;
+
+ class iterator_common {
+ public:
+ friend bool operator==(const iterator_common& a, const iterator_common& b) {
+ return a.ptr == b.ptr;
+ }
+
+ friend bool operator!=(const iterator_common& a, const iterator_common& b) {
+ return !(a == b);
+ }
+
+ protected:
+ iterator_common() = default;
+ explicit iterator_common(Block* b)
+ : block(b), ptr(b->start()), limit(b->limit()) {}
+
+ void Incr() {
+ // If we do not have a next block, make ptr point one past the end of this
+ // block. If we do have a next block, make ptr point to the first element
+ // of the next block.
+ ++ptr;
+ if (ptr == limit && block->next()) *this = iterator_common(block->next());
+ }
+
+ void IncrBy(size_t n) {
+ while (ptr + n > limit) {
+ n -= limit - ptr;
+ *this = iterator_common(block->next());
+ }
+ ptr += n;
+ }
+
+ Block* block = nullptr;
+ T* ptr = nullptr;
+ T* limit = nullptr;
+ };
+
+ // CT can be either T or const T.
+ template <typename CT>
+ class basic_iterator : public iterator_common {
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename AllocatorTraits::value_type;
+ using difference_type = typename AllocatorTraits::difference_type;
+ using pointer =
+ typename std::conditional<std::is_const<CT>::value,
+ typename AllocatorTraits::const_pointer,
+ typename AllocatorTraits::pointer>::type;
+ using reference = CT&;
+
+ basic_iterator() = default;
+
+ // Copy ctor if CT is T.
+ // Otherwise it's a conversion of iterator to const_iterator.
+ basic_iterator(const basic_iterator<T>& it) // NOLINT(runtime/explicit)
+ : iterator_common(it) {}
+
+ basic_iterator& operator=(const basic_iterator& other) = default;
+
+ reference operator*() const { return *this->ptr; }
+ pointer operator->() const { return this->ptr; }
+ basic_iterator& operator++() {
+ this->Incr();
+ return *this;
+ }
+ basic_iterator operator++(int) {
+ basic_iterator t = *this;
+ ++*this;
+ return t;
+ }
+
+ private:
+ explicit basic_iterator(Block* b) : iterator_common(b) {}
+
+ friend chunked_queue;
+ };
+
+ public:
+ using allocator_type = typename AllocatorTraits::allocator_type;
+ using value_type = typename AllocatorTraits::value_type;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using iterator = basic_iterator<T>;
+ using const_iterator = basic_iterator<const T>;
+
+ // Constructs an empty queue.
+ chunked_queue() : chunked_queue(allocator_type()) {}
+
+ // Constructs an empty queue with a custom allocator.
+ explicit chunked_queue(const allocator_type& alloc)
+ : alloc_and_size_(alloc) {}
+
+ // Constructs a queue with `count` default-inserted elements.
+ explicit chunked_queue(size_type count,
+ const allocator_type& alloc = allocator_type())
+ : alloc_and_size_(alloc) {
+ resize(count);
+ }
+
+ // Constructs a queue with `count` copies of `value`.
+ chunked_queue(size_type count, const T& value,
+ const allocator_type& alloc = allocator_type())
+ : alloc_and_size_(alloc) {
+ assign(count, value);
+ }
+
+ // Constructs a queue with the contents of the range [first, last).
+ template <typename Iter,
+ typename = std::enable_if_t<
+ base_internal::IsAtLeastInputIterator<Iter>::value>>
+ chunked_queue(Iter first, Iter last,
+ const allocator_type& alloc = allocator_type())
+ : alloc_and_size_(alloc) {
+ using Tag = typename std::iterator_traits<Iter>::iterator_category;
+ RangeInit(first, last, Tag());
+ }
+
+ // Constructs a queue with the contents of the initializer list `list`.
+ chunked_queue(std::initializer_list<T> list,
+ const allocator_type& alloc = allocator_type())
+ : chunked_queue(list.begin(), list.end(), alloc) {}
+
+ ~chunked_queue();
+
+ // Copy constructor.
+ chunked_queue(const chunked_queue& other)
+ : chunked_queue(other,
+ AllocatorTraits::select_on_container_copy_construction(
+ other.alloc_and_size_.allocator())) {}
+
+ // Copy constructor with specific allocator.
+ chunked_queue(const chunked_queue& other, const allocator_type& alloc)
+ : alloc_and_size_(alloc) {
+ for (const_reference item : other) {
+ push_back(item);
+ }
+ }
+
+ // Move constructor.
+ chunked_queue(chunked_queue&& other) noexcept
+ : head_(other.head_),
+ tail_(other.tail_),
+ alloc_and_size_(std::move(other.alloc_and_size_)) {
+ other.head_ = {};
+ other.tail_ = {};
+ other.alloc_and_size_.size = 0;
+ }
+
+ // Replaces contents with those from initializer list `il`.
+ chunked_queue& operator=(std::initializer_list<T> il) {
+ assign(il.begin(), il.end());
+ return *this;
+ }
+
+ // Copy assignment operator.
+ chunked_queue& operator=(const chunked_queue& other) {
+ if (this == &other) {
+ return *this;
+ }
+ if (AllocatorTraits::propagate_on_container_copy_assignment::value &&
+ (alloc_and_size_.allocator() != other.alloc_and_size_.allocator())) {
+ // Destroy all current elements and blocks with the current allocator,
+ // before switching this to use the allocator propagated from "other".
+ DestroyAndDeallocateAll();
+ alloc_and_size_ = AllocatorAndSize(other.alloc_and_size_.allocator());
+ }
+ assign(other.begin(), other.end());
+ return *this;
+ }
+
+ // Move assignment operator.
+ chunked_queue& operator=(chunked_queue&& other) noexcept;
+
+ // Returns true if the queue contains no elements.
+ bool empty() const { return alloc_and_size_.size == 0; }
+
+ // Returns the number of elements in the queue.
+ size_t size() const { return alloc_and_size_.size; }
+
+ // Returns the maximum number of elements the queue is able to hold.
+ size_type max_size() const noexcept {
+ return AllocatorTraits::max_size(alloc_and_size_.allocator());
+ }
+
+ // Resizes the container to contain `new_size` elements.
+ // If `new_size > size()`, additional default-inserted elements are appended.
+ // If `new_size < size()`, elements are removed from the end.
+ void resize(size_t new_size);
+
+ // Resizes the container to contain `new_size` elements.
+ // If `new_size > size()`, additional copies of `value` are appended.
+ // If `new_size < size()`, elements are removed from the end.
+ void resize(size_type new_size, const T& value) {
+ if (new_size > size()) {
+ size_t to_add = new_size - size();
+ for (size_t i = 0; i < to_add; ++i) {
+ push_back(value);
+ }
+ } else {
+ resize(new_size);
+ }
+ }
+
+ // Requests the removal of unused capacity.
+ void shrink_to_fit() {
+ // As an optimization clear() leaves the first block of the chunked_queue
+ // allocated (but empty). When empty, shrink_to_fit() deallocates the first
+ // block by swapping it a newly constructed container that has no first
+ // block.
+ if (empty()) {
+ chunked_queue(alloc_and_size_.allocator()).swap(*this);
+ }
+ }
+
+ // Replaces the contents with copies of those in the range [first, last).
+ template <typename Iter,
+ typename = std::enable_if_t<
+ base_internal::IsAtLeastInputIterator<Iter>::value>>
+ void assign(Iter first, Iter last) {
+ auto out = begin();
+ Block* prev_block = nullptr;
+
+ // Overwrite existing elements.
+ for (; out != end() && first != last; ++first) {
+ // Track the previous block so we can correctly update tail_ if we stop
+ // exactly at a block boundary.
+ if (out.ptr + 1 == out.block->limit()) {
+ prev_block = out.block;
+ }
+ *out = *first;
+ ++out;
+ }
+
+ // If we stopped exactly at the start of a block (meaning the previous block
+ // was full), we must ensure tail_ points to the end of the previous block,
+ // not the start of the current (now empty and to be deleted) block.
+ // This maintains the invariant required by back() which assumes tail_
+ // never points to the start of a block (unless it's the only block).
+ if (!empty() && out.block != nullptr && out.ptr == out.block->start() &&
+ prev_block != nullptr) {
+ // Delete the current block and all subsequent blocks.
+ //
+ // NOTE: Calling EraseAllFrom on an iterator that points to the limit of
+ // the previous block will not delete any element from the previous block.
+ iterator prev_block_end(prev_block);
+ prev_block_end.ptr = prev_block->limit();
+ EraseAllFrom(prev_block_end);
+
+ // Update tail_ to point to the end of the previous block.
+ tail_ = prev_block_end;
+ prev_block->set_next(nullptr);
+ } else {
+ // Standard erase from the current position to the end.
+ EraseAllFrom(out);
+ }
+
+ // Append any remaining new elements.
+ for (; first != last; ++first) {
+ push_back(*first);
+ }
+ }
+
+ // Replaces the contents with `count` copies of `value`.
+ void assign(size_type count, const T& value) {
+ clear();
+ for (size_type i = 0; i < count; ++i) {
+ push_back(value);
+ }
+ }
+
+ // Replaces the contents with the elements from the initializer list `il`.
+ void assign(std::initializer_list<T> il) { assign(il.begin(), il.end()); }
+
+ // Appends the given element value to the end of the container.
+ // Invalidates `end()` iterator. References to other elements remain valid.
+ void push_back(const T& val) { emplace_back(val); }
+ void push_back(T&& val) { emplace_back(std::move(val)); }
+
+ // Appends a new element to the end of the container.
+ // The element is constructed in-place with `args`.
+ // Returns a reference to the new element.
+ // Invalidates `end()` iterator. References to other elements remain valid.
+ template <typename... A>
+ T& emplace_back(A&&... args) {
+ T* storage = AllocateBack();
+ AllocatorTraits::construct(alloc_and_size_.allocator(), storage,
+ std::forward<A>(args)...);
+ return *storage;
+ }
+
+ // Removes the first element of the container.
+ // Invalidates iterators to the removed element.
+ // REQUIRES: !empty()
+ void pop_front();
+
+ // Returns a reference to the first element in the container.
+ // REQUIRES: !empty()
+ T& front() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return *head_;
+ }
+ const T& front() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return *head_;
+ }
+
+ // Returns a reference to the last element in the container.
+ // REQUIRES: !empty()
+ T& back() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return *(&*tail_ - 1);
+ }
+ const T& back() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return *(&*tail_ - 1);
+ }
+
+ // Swaps the contents of this queue with `other`.
+ void swap(chunked_queue& other) noexcept {
+ using std::swap;
+ swap(head_, other.head_);
+ swap(tail_, other.tail_);
+ if (AllocatorTraits::propagate_on_container_swap::value) {
+ swap(alloc_and_size_, other.alloc_and_size_);
+ } else {
+ // Swap only the sizes; each object keeps its allocator.
+ //
+ // (It is undefined behavior to swap between two containers with unequal
+ // allocators if propagate_on_container_swap is false, so we don't have to
+ // handle that here like we do in the move-assignment operator.)
+ ABSL_HARDENING_ASSERT(get_allocator() == other.get_allocator());
+ swap(alloc_and_size_.size, other.alloc_and_size_.size);
+ }
+ }
+
+ // Erases all elements from the container.
+ // Note: Leaves one empty block allocated as an optimization.
+ // To free all memory, call shrink_to_fit() after calling clear().
+ void clear();
+
+ iterator begin() { return head_; }
+ iterator end() { return tail_; }
+
+ const_iterator begin() const { return head_; }
+ const_iterator end() const { return tail_; }
+
+ const_iterator cbegin() const { return head_; }
+ const_iterator cend() const { return tail_; }
+
+ // Returns the allocator associated with the container.
+ allocator_type get_allocator() const { return alloc_and_size_.allocator(); }
+
+ private:
+ // Empty base-class optimization: bundle storage for our allocator together
+ // with a field we had to store anyway (size), via inheriting from the
+ // allocator, so this allocator instance doesn't consume any storage
+ // when its type has no data members.
+ struct AllocatorAndSize : private allocator_type {
+ explicit AllocatorAndSize(const allocator_type& alloc)
+ : allocator_type(alloc) {}
+ const allocator_type& allocator() const { return *this; }
+ allocator_type& allocator() { return *this; }
+ size_t size = 0;
+ };
+
+ template <typename Iter>
+ void RangeInit(Iter first, Iter last, std::input_iterator_tag) {
+ while (first != last) {
+ AddTailBlock();
+ for (; first != last && tail_.ptr != tail_.limit;
+ ++alloc_and_size_.size, ++tail_.ptr, ++first) {
+ AllocatorTraits::construct(alloc_and_size_.allocator(), tail_.ptr,
+ *first);
+ }
+ }
+ }
+
+ void Construct(T* start, T* limit) {
+ ABSL_ASSERT(start <= limit);
+ for (; start != limit; ++start) {
+ AllocatorTraits::construct(alloc_and_size_.allocator(), start);
+ }
+ }
+
+ size_t Destroy(T* start, T* limit) {
+ ABSL_ASSERT(start <= limit);
+ const size_t n = limit - start;
+ for (; start != limit; ++start) {
+ AllocatorTraits::destroy(alloc_and_size_.allocator(), start);
+ }
+ return n;
+ }
+
+ T* block_begin(Block* b) const {
+ return b == head_.block ? head_.ptr : b->start();
+ }
+ T* block_end(Block* b) const {
+ // We have the choice of !b->next or b == tail_.block to determine if b is
+ // the tail or not. !b->next is usually faster because the caller of
+ // block_end() is most likely traversing the list of blocks so b->next is
+ // already fetched into some register.
+ return !b->next() ? tail_.ptr : b->limit();
+ }
+
+ void AddTailBlock();
+ size_t NewBlockSize() {
+ // Double the last block size and bound to [kBlockSizeMin, kBlockSizeMax].
+ if (!tail_.block) return kBlockSizeMin;
+ return (std::min)(kBlockSizeMax, 2 * tail_.block->size());
+ }
+
+ T* AllocateBack();
+ void EraseAllFrom(iterator i);
+
+ // Destroys any contained elements and destroys all allocated storage.
+ // (Like clear(), except this doesn't leave any empty blocks behind.)
+ void DestroyAndDeallocateAll();
+
+ // The set of elements in the queue is the following:
+ //
+ // (1) When we have just one block:
+ // [head_.ptr .. tail_.ptr-1]
+ // (2) When we have multiple blocks:
+ // [head_.ptr .. head_.limit-1]
+ // ... concatenation of all elements from interior blocks ...
+ // [tail_.ptr .. tail_.limit-1]
+ //
+ // Rep invariants:
+ // When have just one block:
+ // head_.limit == tail_.limit == &head_.block->element[kBlockSize]
+ // Always:
+ // head_.ptr <= head_.limit
+ // tail_.ptr <= tail_.limit
+
+ iterator head_;
+ iterator tail_;
+ AllocatorAndSize alloc_and_size_;
+};
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+constexpr size_t chunked_queue<T, BLo, BHi, Allocator>::kBlockSizeMin;
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+constexpr size_t chunked_queue<T, BLo, BHi, Allocator>::kBlockSizeMax;
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void swap(chunked_queue<T, BLo, BHi, Allocator>& a,
+ chunked_queue<T, BLo, BHi, Allocator>& b) noexcept {
+ a.swap(b);
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+chunked_queue<T, BLo, BHi, Allocator>&
+chunked_queue<T, BLo, BHi, Allocator>::operator=(
+ chunked_queue&& other) noexcept {
+ if (this == &other) {
+ return *this;
+ }
+ DestroyAndDeallocateAll();
+
+ if constexpr (AllocatorTraits::propagate_on_container_move_assignment::
+ value) {
+ // Take over the storage of "other", along with its allocator.
+ head_ = other.head_;
+ tail_ = other.tail_;
+ alloc_and_size_ = std::move(other.alloc_and_size_);
+ other.head_ = {};
+ other.tail_ = {};
+ other.alloc_and_size_.size = 0;
+ } else if (get_allocator() == other.get_allocator()) {
+ // Take over the storage of "other", with which we share an allocator.
+ head_ = other.head_;
+ tail_ = other.tail_;
+ alloc_and_size_.size = other.alloc_and_size_.size;
+ other.head_ = {};
+ other.tail_ = {};
+ other.alloc_and_size_.size = 0;
+ } else {
+ // We cannot take over of the storage from "other", since it has a different
+ // allocator; we're stuck move-assigning elements individually.
+ for (auto& elem : other) {
+ push_back(std::move(elem));
+ }
+ }
+ return *this;
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline chunked_queue<T, BLo, BHi, Allocator>::~chunked_queue() {
+ Block* b = head_.block;
+ while (b) {
+ Block* next = b->next();
+ Destroy(block_begin(b), block_end(b));
+ Block::Delete(b, &alloc_and_size_.allocator());
+ b = next;
+ }
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+void chunked_queue<T, BLo, BHi, Allocator>::resize(size_t new_size) {
+ while (new_size > size()) {
+ ptrdiff_t to_add = new_size - size();
+ if (tail_.ptr == tail_.limit) {
+ AddTailBlock();
+ }
+ T* start = tail_.ptr;
+ T* limit = (std::min)(tail_.limit, start + to_add);
+ Construct(start, limit);
+ tail_.ptr = limit;
+ alloc_and_size_.size += limit - start;
+ }
+ if (size() == new_size) {
+ return;
+ }
+ ABSL_ASSERT(new_size < size());
+ auto new_end = begin();
+ new_end.IncrBy(new_size);
+ ABSL_ASSERT(new_end != end());
+ EraseAllFrom(new_end);
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void chunked_queue<T, BLo, BHi, Allocator>::AddTailBlock() {
+ ABSL_ASSERT(tail_.ptr == tail_.limit);
+ auto* b = Block::New(NewBlockSize(), &alloc_and_size_.allocator());
+ if (!head_.block) {
+ ABSL_ASSERT(!tail_.block);
+ head_ = iterator(b);
+ } else {
+ ABSL_ASSERT(tail_.block);
+ tail_.block->set_next(b);
+ }
+ tail_ = iterator(b);
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline T* chunked_queue<T, BLo, BHi, Allocator>::AllocateBack() {
+ if (tail_.ptr == tail_.limit) {
+ AddTailBlock();
+ }
+ ++alloc_and_size_.size;
+ return tail_.ptr++;
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void chunked_queue<T, BLo, BHi, Allocator>::EraseAllFrom(iterator i) {
+ if (!i.block) {
+ return;
+ }
+ ABSL_ASSERT(i.ptr);
+ ABSL_ASSERT(i.limit);
+ alloc_and_size_.size -= Destroy(i.ptr, block_end(i.block));
+ Block* b = i.block->next();
+ while (b) {
+ Block* next = b->next();
+ alloc_and_size_.size -= Destroy(b->start(), block_end(b));
+ Block::Delete(b, &alloc_and_size_.allocator());
+ b = next;
+ }
+ tail_ = i;
+ tail_.block->set_next(nullptr);
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void chunked_queue<T, BLo, BHi, Allocator>::DestroyAndDeallocateAll() {
+ Block* b = head_.block;
+ while (b) {
+ Block* next = b->next();
+ Destroy(block_begin(b), block_end(b));
+ Block::Delete(b, &alloc_and_size_.allocator());
+ b = next;
+ }
+ head_ = iterator();
+ tail_ = iterator();
+ alloc_and_size_.size = 0;
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void chunked_queue<T, BLo, BHi, Allocator>::pop_front() {
+ ABSL_HARDENING_ASSERT(!empty());
+ ABSL_ASSERT(head_.block);
+ AllocatorTraits::destroy(alloc_and_size_.allocator(), head_.ptr);
+ ++head_.ptr;
+ --alloc_and_size_.size;
+ if (empty()) {
+ // Reset head and tail to the start of the (only) block.
+ ABSL_ASSERT(head_.block == tail_.block);
+ head_.ptr = tail_.ptr = head_.block->start();
+ return;
+ }
+ if (head_.ptr == head_.limit) {
+ Block* n = head_.block->next();
+ Block::Delete(head_.block, &alloc_and_size_.allocator());
+ head_ = iterator(n);
+ }
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+void chunked_queue<T, BLo, BHi, Allocator>::clear() {
+ // NOTE: As an optimization we leave one block allocated.
+ Block* b = head_.block;
+ if (!b) {
+ ABSL_ASSERT(empty());
+ return;
+ }
+ while (b) {
+ Block* next = b->next();
+ Destroy(block_begin(b), block_end(b));
+ if (head_.block != b) {
+ Block::Delete(b, &alloc_and_size_.allocator());
+ }
+ b = next;
+ }
+ b = head_.block;
+ b->set_next(nullptr);
+ head_ = tail_ = iterator(b);
+ alloc_and_size_.size = 0;
+}
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_CHUNKED_QUEUE_H_
diff --git a/absl/container/chunked_queue_benchmark.cc b/absl/container/chunked_queue_benchmark.cc
new file mode 100644
index 0000000..ee4d3c1
--- /dev/null
+++ b/absl/container/chunked_queue_benchmark.cc
@@ -0,0 +1,386 @@
+// Copyright 2025 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <forward_list>
+#include <list>
+#include <random>
+
+#include "absl/container/chunked_queue.h"
+#include "absl/random/random.h"
+#include "absl/status/status.h"
+#include "absl/strings/cord.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+// Queue implementation using std::forward_list. Used to benchmark
+// absl::chunked_queue against another plausable implementation.
+template <typename T>
+class forward_list_queue {
+ public:
+ using iterator = typename std::forward_list<T>::iterator;
+
+ forward_list_queue() = default;
+ ~forward_list_queue() = default;
+
+ template <typename... Args>
+ void emplace_back(Args&&... args) {
+ if (list_.empty()) {
+ list_.emplace_front(std::forward<Args>(args)...);
+ tail_ = list_.begin();
+ } else {
+ list_.emplace_after(tail_, std::forward<Args>(args)...);
+ ++tail_;
+ }
+ }
+
+ void push_back(const T& value) { emplace_back(value); }
+ iterator begin() { return list_.begin(); }
+ iterator end() { return list_.end(); }
+ T& front() { return list_.front(); }
+ const T& front() const { return list_.front(); }
+ void pop_front() { list_.pop_front(); }
+ bool empty() const { return list_.empty(); }
+ void clear() { list_.clear(); }
+
+ private:
+ std::forward_list<T> list_;
+ typename std::forward_list<T>::iterator tail_;
+};
+
+template <class T>
+using Deque = std::deque<T>;
+template <class T>
+using List = std::list<T>;
+template <class T>
+using FwdList = forward_list_queue<T>;
+template <class T>
+using Chunked = absl::chunked_queue<T>;
+template <class T>
+using ExpChunked = absl::chunked_queue<T, 2, 64>;
+
+class Element {
+ public:
+ Element() : Element(-1) {}
+ Element(int type) : type_(type) {} // NOLINT
+ operator int() const { return type_; } // NOLINT
+
+ private:
+ int type_;
+ absl::Cord item_;
+ absl::Status status_;
+};
+
+template <class Q>
+Q MakeQueue(int64_t num_elements) {
+ Q q;
+ for (int64_t i = 0; i < num_elements; i++) {
+ q.push_back(static_cast<int>(i));
+ }
+ return q;
+}
+
+void CustomArgs(benchmark::internal::Benchmark* b) {
+ b->Arg(1 << 4);
+ b->Arg(1 << 10);
+ b->Arg(1 << 17);
+}
+
+template <class Q>
+void BM_construct(benchmark::State& state) {
+ for (auto s : state) {
+ Q q;
+ benchmark::DoNotOptimize(q);
+ }
+}
+
+BENCHMARK_TEMPLATE(BM_construct, Deque<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, List<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, FwdList<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, Chunked<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, ExpChunked<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, Deque<Element>);
+BENCHMARK_TEMPLATE(BM_construct, List<Element>);
+BENCHMARK_TEMPLATE(BM_construct, FwdList<Element>);
+BENCHMARK_TEMPLATE(BM_construct, Chunked<Element>);
+BENCHMARK_TEMPLATE(BM_construct, ExpChunked<Element>);
+
+template <class Q>
+void BM_destroy(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ for (auto s : state) {
+ state.PauseTiming();
+ {
+ Q q = MakeQueue<Q>(num_elements);
+ benchmark::DoNotOptimize(q);
+ state.ResumeTiming();
+ }
+ }
+ state.SetItemsProcessed(state.iterations() * num_elements);
+}
+
+BENCHMARK_TEMPLATE(BM_destroy, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_push_back(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+ for (auto s : state) {
+ state.PauseTiming();
+ Q q;
+ state.ResumeTiming();
+ for (int j = 0; j < num_elements; j++) q.push_back(j);
+ benchmark::DoNotOptimize(q);
+ }
+}
+
+BENCHMARK_TEMPLATE(BM_push_back, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_pop_front(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+ for (auto s : state) {
+ state.PauseTiming();
+ Q q = MakeQueue<Q>(num_elements);
+ state.ResumeTiming();
+ for (int j = 0; j < num_elements; j++) q.pop_front();
+ benchmark::DoNotOptimize(q);
+ }
+}
+
+BENCHMARK_TEMPLATE(BM_pop_front, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_clear(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+ for (auto s : state) {
+ state.PauseTiming();
+ Q q = MakeQueue<Q>(num_elements);
+ state.ResumeTiming();
+ q.clear();
+ benchmark::DoNotOptimize(q);
+ }
+}
+
+BENCHMARK_TEMPLATE(BM_clear, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_iter(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+ for (auto s : state) {
+ state.PauseTiming();
+ Q q = MakeQueue<Q>(state.max_iterations);
+ int sum = 0;
+ state.ResumeTiming();
+ for (const auto& v : q) sum += v;
+ benchmark::DoNotOptimize(sum);
+ }
+}
+
+BENCHMARK_TEMPLATE(BM_iter, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_resize_shrink(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+ for (auto s : state) {
+ state.PauseTiming();
+ Q q = MakeQueue<Q>(num_elements * 2);
+ state.ResumeTiming();
+ q.resize(num_elements);
+ benchmark::DoNotOptimize(q);
+ }
+}
+
+// FwdList does not support resize.
+BENCHMARK_TEMPLATE(BM_resize_shrink, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, List<int64_t>)->Apply(CustomArgs);
+// BENCHMARK_TEMPLATE(BM_resize_shrink, FwdList<int64>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, List<Element>)->Apply(CustomArgs);
+// BENCHMARK_TEMPLATE(BM_resize_shrink, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_resize_grow(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+ for (auto s : state) {
+ state.PauseTiming();
+ Q q = MakeQueue<Q>(num_elements);
+ state.ResumeTiming();
+ q.resize(static_cast<size_t>(num_elements) * 2);
+ benchmark::DoNotOptimize(q);
+ }
+}
+
+// FwdList does not support resize.
+BENCHMARK_TEMPLATE(BM_resize_grow, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, List<int64_t>)->Apply(CustomArgs);
+// BENCHMARK_TEMPLATE(BM_resize_grow, FwdList<int64>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, List<Element>)->Apply(CustomArgs);
+// BENCHMARK_TEMPLATE(BM_resize_grow, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_assign_shrink(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+ for (auto s : state) {
+ state.PauseTiming();
+ const Q src = MakeQueue<Q>(num_elements);
+ Q dst = MakeQueue<Q>(num_elements * 2);
+ state.ResumeTiming();
+ dst = src;
+ benchmark::DoNotOptimize(dst);
+ }
+}
+
+BENCHMARK_TEMPLATE(BM_assign_shrink, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_assign_grow(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+ for (auto s : state) {
+ state.PauseTiming();
+ const Q src = MakeQueue<Q>(num_elements * 2);
+ Q dst = MakeQueue<Q>(num_elements);
+ state.ResumeTiming();
+ dst = src;
+ benchmark::DoNotOptimize(dst);
+ }
+}
+
+BENCHMARK_TEMPLATE(BM_assign_grow, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_push_pop(benchmark::State& state) {
+ const int64_t num_elements = state.range(0);
+
+ state.SetItemsProcessed(state.max_iterations * num_elements);
+
+ std::mt19937 rnd;
+ for (auto s : state) {
+ state.PauseTiming();
+ Q q;
+ state.ResumeTiming();
+ for (int j = 0; j < num_elements; j++) {
+ if (q.empty() || absl::Bernoulli(rnd, 0.5)) {
+ q.push_back(state.iterations());
+ } else {
+ q.pop_front();
+ }
+ }
+ benchmark::DoNotOptimize(q);
+ }
+}
+
+BENCHMARK_TEMPLATE(BM_push_pop, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, ExpChunked<Element>)->Apply(CustomArgs);
+
+} // namespace
diff --git a/absl/container/chunked_queue_test.cc b/absl/container/chunked_queue_test.cc
new file mode 100644
index 0000000..a66d0c2
--- /dev/null
+++ b/absl/container/chunked_queue_test.cc
@@ -0,0 +1,768 @@
+// Copyright 2025 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/chunked_queue.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <forward_list>
+#include <iterator>
+#include <list>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/test_allocator.h"
+#include "absl/strings/str_cat.h"
+
+using ::testing::ElementsAre;
+using ::testing::Pair;
+using ::testing::Pointee;
+using ::testing::SizeIs;
+
+// Hide in a namespace to make sure swap is found via ADL.
+namespace adl_namespace {
+namespace {
+TEST(ChunkedQueueADLTest, Swap) {
+ absl::chunked_queue<int64_t> q1;
+ absl::chunked_queue<int64_t> q2;
+ q1.push_back(4);
+ q2.push_back(5);
+ q2.push_back(6);
+ swap(q1, q2);
+ EXPECT_THAT(q1, ElementsAre(5, 6));
+ EXPECT_THAT(q2, ElementsAre(4));
+}
+} // namespace
+} // namespace adl_namespace
+
+namespace {
+
+template <class T>
+using ChunkedQueueBlock =
+ absl::container_internal::ChunkedQueueBlock<T, std::allocator<T>>;
+
+TEST(Internal, elements_in_bytes) {
+ EXPECT_EQ(size_t{1}, ChunkedQueueBlock<int>::block_size_from_bytes(0));
+ EXPECT_EQ(size_t{1}, ChunkedQueueBlock<int>::block_size_from_bytes(
+ sizeof(ChunkedQueueBlock<int>)));
+ EXPECT_EQ(size_t{1},
+ ChunkedQueueBlock<int>::block_size_from_bytes(sizeof(int)));
+ EXPECT_EQ(size_t{2}, ChunkedQueueBlock<int>::block_size_from_bytes(
+ sizeof(ChunkedQueueBlock<int>) + 2 * sizeof(int)));
+}
+
+TEST(Internal, BlockSizedDelete) {
+ struct Item {
+ int i;
+ char c;
+ };
+ std::allocator<Item> allocator;
+ auto* block = ChunkedQueueBlock<Item>::New(3, &allocator);
+ ChunkedQueueBlock<Item>::Delete(block, &allocator);
+}
+
+template <size_t elem_size>
+void BlockSizeRounding() {
+ struct Elem {
+ char data[elem_size];
+ };
+ typedef ChunkedQueueBlock<Elem> Block;
+ for (size_t n = 1; n < 100; ++n) {
+ SCOPED_TRACE(n);
+ std::allocator<Elem> allocator;
+ Block* b = Block::New(n, &allocator);
+ EXPECT_GE(b->size(), n);
+ Block::Delete(b, &allocator);
+ }
+}
+
+TEST(Internal, BlockSizeRounding1) { BlockSizeRounding<1>(); }
+TEST(Internal, BlockSizeRounding17) { BlockSizeRounding<17>(); }
+TEST(Internal, BlockSizeRounding101) { BlockSizeRounding<101>(); }
+TEST(Internal, BlockSizeRounding528) { BlockSizeRounding<528>(); }
+
+TEST(ChunkedQueue, MinMaxBlockSize) {
+ absl::chunked_queue<int64_t, 1, 2> q = {1, 2, 3};
+ EXPECT_THAT(q, ElementsAre(1, 2, 3));
+}
+
+TEST(ChunkedQueue, Empty) {
+ absl::chunked_queue<int64_t> q;
+ EXPECT_TRUE(q.empty());
+ q.push_back(10);
+ EXPECT_FALSE(q.empty());
+ EXPECT_EQ(q.front(), 10);
+ EXPECT_EQ(q.back(), 10);
+ q.pop_front();
+ EXPECT_TRUE(q.empty());
+ q.clear();
+ EXPECT_TRUE(q.empty());
+}
+
+TEST(ChunkedQueue, CopyConstruct) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(1);
+ absl::chunked_queue<int64_t> r(q);
+ EXPECT_THAT(r, ElementsAre(1));
+ EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, CopyConstructMultipleChunks) {
+ absl::chunked_queue<int64_t, 2> q;
+ q.push_back(1);
+ q.push_back(2);
+ q.push_back(3);
+ absl::chunked_queue<int64_t, 2> r(q);
+ EXPECT_THAT(r, ElementsAre(1, 2, 3));
+ EXPECT_EQ(3, r.size());
+}
+
+TEST(ChunkedQueue, BeginEndConstruct) {
+ std::vector<int64_t> src = {1, 2, 3, 4, 5};
+ absl::chunked_queue<int64_t, 2> q(src.begin(), src.end());
+ EXPECT_THAT(q, ElementsAre(1, 2, 3, 4, 5));
+ EXPECT_EQ(5, q.size());
+}
+
+TEST(ChunkedQueue, InitializerListConstruct) {
+ absl::chunked_queue<int64_t, 2> q = {1, 2, 3, 4, 5};
+ EXPECT_THAT(q, ElementsAre(1, 2, 3, 4, 5));
+ EXPECT_EQ(5, q.size());
+}
+
+TEST(ChunkedQueue, CountConstruct) {
+ absl::chunked_queue<int64_t> q(3);
+ EXPECT_THAT(q, ElementsAre(0, 0, 0));
+ EXPECT_EQ(3, q.size());
+}
+
+TEST(ChunkedQueue, CountValueConstruct) {
+ absl::chunked_queue<int64_t> q(3, 10);
+ EXPECT_THAT(q, ElementsAre(10, 10, 10));
+ EXPECT_EQ(3, q.size());
+}
+
+TEST(ChunkedQueue, InitializerListAssign) {
+ absl::chunked_queue<int64_t, 2> q;
+ q = {1, 2, 3, 4, 5};
+ EXPECT_THAT(q, ElementsAre(1, 2, 3, 4, 5));
+ EXPECT_EQ(5, q.size());
+}
+
+TEST(ChunkedQueue, CopyAssign) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(1);
+ absl::chunked_queue<int64_t> r = q;
+ EXPECT_THAT(r, ElementsAre(1));
+}
+
+TEST(ChunkedQueue, CopyAssignSelf) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(1);
+ q = *&q; // Avoid -Wself-assign.
+ EXPECT_THAT(q, ElementsAre(1));
+ EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, CopyAssignDestinationBigger) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(1);
+ absl::chunked_queue<int64_t> r;
+ r.push_back(9);
+ r.push_back(9);
+ r.push_back(9);
+ r = q;
+ EXPECT_THAT(r, ElementsAre(1));
+ EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, CopyAssignSourceBiggerMultipleChunks) {
+ absl::chunked_queue<int64_t, 2> q;
+ q.push_back(1);
+ q.push_back(2);
+ q.push_back(3);
+ absl::chunked_queue<int64_t, 2> r;
+ r.push_back(9);
+ r = q;
+ EXPECT_THAT(r, ElementsAre(1, 2, 3));
+ EXPECT_EQ(3, r.size());
+}
+
+TEST(ChunkedQueue, CopyAssignDestinationBiggerMultipleChunks) {
+ absl::chunked_queue<int64_t, 2> q;
+ q.push_back(1);
+ absl::chunked_queue<int64_t, 2> r;
+ r.push_back(9);
+ r.push_back(9);
+ r.push_back(9);
+ r = q;
+ EXPECT_THAT(r, ElementsAre(1));
+ EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, AssignCountValue) {
+ absl::chunked_queue<int64_t> q;
+ q.assign(3, 10);
+ EXPECT_THAT(q, ElementsAre(10, 10, 10));
+ EXPECT_EQ(3, q.size());
+
+ q.assign(2, 20);
+ EXPECT_THAT(q, ElementsAre(20, 20));
+ EXPECT_EQ(2, q.size());
+}
+
+TEST(ChunkedQueue, MoveConstruct) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(1);
+ absl::chunked_queue<int64_t> r(std::move(q));
+ EXPECT_THAT(r, ElementsAre(1));
+ EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, MoveAssign) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(1);
+ absl::chunked_queue<int64_t> r;
+ r = std::move(q);
+ EXPECT_THAT(r, ElementsAre(1));
+ EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, MoveAssignImmovable) {
+ struct Immovable {
+ Immovable() = default;
+
+ Immovable(const Immovable&) = delete;
+ Immovable& operator=(const Immovable&) = delete;
+ Immovable(Immovable&&) = delete;
+ Immovable& operator=(Immovable&&) = delete;
+ };
+ absl::chunked_queue<Immovable> q;
+ q.emplace_back();
+ absl::chunked_queue<Immovable> r;
+ r = std::move(q);
+ EXPECT_THAT(r, SizeIs(1));
+}
+
+TEST(ChunkedQueue, MoveAssignSelf) {
+ absl::chunked_queue<int64_t> q;
+ absl::chunked_queue<int64_t>& q2 = q;
+ q.push_back(1);
+ q = std::move(q2);
+ EXPECT_THAT(q, ElementsAre(1));
+ EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, MoveAssignDestinationBigger) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(1);
+ absl::chunked_queue<int64_t> r;
+ r.push_back(9);
+ r.push_back(9);
+ r.push_back(9);
+ r = std::move(q);
+ EXPECT_THAT(r, ElementsAre(1));
+ EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, MoveAssignDestinationBiggerMultipleChunks) {
+ absl::chunked_queue<int64_t, 2> q;
+ q.push_back(1);
+ absl::chunked_queue<int64_t, 2> r;
+ r.push_back(9);
+ r.push_back(9);
+ r.push_back(9);
+ r = std::move(q);
+ EXPECT_THAT(r, ElementsAre(1));
+ EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, ConstFrontBack) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(10);
+ EXPECT_EQ(q.front(), 10);
+ EXPECT_EQ(q.back(), 10);
+ q.front() = 12;
+ EXPECT_EQ(q.front(), 12);
+ EXPECT_EQ(q.back(), 12);
+
+ const absl::chunked_queue<int64_t>& qref = q;
+ EXPECT_EQ(qref.front(), 12);
+ EXPECT_EQ(qref.back(), 12);
+
+ q.pop_front();
+
+ // Test at block bloundary and beyond
+ for (int i = 0; i < 64; ++i) q.push_back(i + 10);
+ EXPECT_EQ(q.front(), 10);
+ EXPECT_EQ(q.back(), 73);
+
+ for (int i = 64; i < 128; ++i) q.push_back(i + 10);
+ EXPECT_EQ(q.front(), 10);
+ EXPECT_EQ(q.back(), 137);
+ q.clear();
+ EXPECT_TRUE(q.empty());
+}
+
+TEST(ChunkedQueue, PushAndPop) {
+ absl::chunked_queue<int64_t> q;
+ EXPECT_TRUE(q.empty());
+ EXPECT_EQ(0, q.size());
+ for (int i = 0; i < 10000; i++) {
+ q.push_back(i);
+ EXPECT_EQ(q.front(), 0) << ": iteration " << i;
+ EXPECT_FALSE(q.empty());
+ EXPECT_EQ(i + 1, q.size());
+ }
+ for (int i = 0; i < 10000; i++) {
+ EXPECT_FALSE(q.empty());
+ EXPECT_EQ(10000 - i, q.size());
+ EXPECT_EQ(q.front(), i);
+ q.pop_front();
+ }
+ EXPECT_TRUE(q.empty());
+ EXPECT_EQ(0, q.size());
+}
+
+TEST(ChunkedQueue, Swap) {
+ absl::chunked_queue<int64_t> q1;
+ absl::chunked_queue<int64_t> q2;
+ q1.push_back(4);
+ q2.push_back(5);
+ q2.push_back(6);
+ q2.swap(q1);
+ EXPECT_EQ(2, q1.size());
+ EXPECT_EQ(5, q1.front());
+ EXPECT_EQ(1, q2.size());
+ EXPECT_EQ(4, q2.front());
+ q1.pop_front();
+ q1.swap(q2);
+ EXPECT_EQ(1, q1.size());
+ EXPECT_EQ(4, q1.front());
+ EXPECT_EQ(1, q2.size());
+ EXPECT_EQ(6, q2.front());
+ q1.pop_front();
+ q1.swap(q2);
+ EXPECT_EQ(1, q1.size());
+ EXPECT_EQ(6, q1.front());
+ EXPECT_EQ(0, q2.size());
+ q1.clear();
+ EXPECT_TRUE(q1.empty());
+}
+
+TEST(ChunkedQueue, ShrinkToFit) {
+ absl::chunked_queue<int64_t> q;
+ q.shrink_to_fit(); // Should work on empty
+ EXPECT_TRUE(q.empty());
+
+ q.push_back(1);
+ q.shrink_to_fit(); // Should work on non-empty
+ EXPECT_THAT(q, ElementsAre(1));
+
+ q.clear();
+ // We know clear leaves a block and shrink_to_fit should remove it.
+ // Hard to test internal memory state without mocks or inspection.
+ // But at least we verify it doesn't crash or corrupt.
+ q.shrink_to_fit();
+ EXPECT_TRUE(q.empty());
+}
+
+TEST(ChunkedQueue, ResizeExtends) {
+ absl::chunked_queue<int64_t> q;
+ q.resize(2);
+ EXPECT_THAT(q, ElementsAre(0, 0));
+ EXPECT_EQ(2, q.size());
+}
+
+TEST(ChunkedQueue, ResizeShrinks) {
+ absl::chunked_queue<int64_t> q;
+ q.push_back(1);
+ q.push_back(2);
+ q.resize(1);
+ EXPECT_THAT(q, ElementsAre(1));
+ EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, ResizeExtendsMultipleBlocks) {
+ absl::chunked_queue<int64_t, 2> q;
+ q.resize(3);
+ EXPECT_THAT(q, ElementsAre(0, 0, 0));
+ EXPECT_EQ(3, q.size());
+}
+
+TEST(ChunkedQueue, ResizeShrinksMultipleBlocks) {
+ absl::chunked_queue<int64_t, 2> q;
+ q.push_back(1);
+ q.push_back(2);
+ q.push_back(3);
+ q.resize(1);
+ EXPECT_THAT(q, ElementsAre(1));
+ EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, ResizeValue) {
+ absl::chunked_queue<int64_t> q;
+ q.resize(3, 10);
+ EXPECT_THAT(q, ElementsAre(10, 10, 10));
+ EXPECT_EQ(3, q.size());
+
+ q.resize(5, 20);
+ EXPECT_THAT(q, ElementsAre(10, 10, 10, 20, 20));
+ EXPECT_EQ(5, q.size());
+
+ q.resize(2, 30);
+ EXPECT_THAT(q, ElementsAre(10, 10));
+ EXPECT_EQ(2, q.size());
+}
+
+TEST(ChunkedQueue, MaxSize) {
+ absl::chunked_queue<int64_t> q;
+ EXPECT_GE(q.max_size(),
+ size_t{1} << (sizeof(size_t) * 8 - sizeof(int64_t) - 4));
+}
+
+TEST(ChunkedQueue, AssignExtends) {
+ absl::chunked_queue<int64_t, 2> q;
+ std::vector<int64_t> v = {1, 2, 3, 4, 5};
+ q.assign(v.begin(), v.end());
+ EXPECT_THAT(q, ElementsAre(1, 2, 3, 4, 5));
+ EXPECT_EQ(5, q.size());
+}
+
+TEST(ChunkedQueue, AssignShrinks) {
+ absl::chunked_queue<int64_t, 2> q = {1, 2, 3, 4, 5};
+ std::vector<int64_t> v = {1};
+ q.assign(v.begin(), v.end());
+ EXPECT_THAT(q, ElementsAre(1));
+ EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, AssignBoundaryCondition) {
+ // Create a queue with fixed block size of 4.
+ // 3 blocks: [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]
+ absl::chunked_queue<int, 4> q = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
+
+ // Assign a range that fills exactly the first block (4 elements).
+ // This triggers the boundary condition where the assignment loop ends
+ // exactly at the limit of the first block.
+ std::vector<int> v = {101, 102, 103, 104};
+ q.assign(v.begin(), v.end());
+
+ EXPECT_EQ(q.size(), 4);
+ EXPECT_EQ(q.front(), 101);
+ // Verify back() is valid. If tail_ was incorrectly pointing to the start
+ // of the (now deleted) second block, this might access invalid memory
+ // or fail assertions.
+ EXPECT_EQ(q.back(), 104);
+
+ // Verify we can continue to push elements correctly.
+ q.push_back(105);
+ EXPECT_EQ(q.size(), 5);
+ EXPECT_EQ(q.back(), 105);
+}
+
+TEST(ChunkedQueue, Iterator) {
+ absl::chunked_queue<int64_t> q;
+ EXPECT_TRUE(q.begin() == q.end());
+
+ q.push_back(1);
+ absl::chunked_queue<int64_t>::const_iterator iter = q.begin();
+ ASSERT_FALSE(iter == q.end());
+ ASSERT_EQ(*iter, 1);
+ ++iter;
+ ASSERT_TRUE(iter == q.end());
+
+ q.push_back(2);
+ iter = q.begin();
+ ASSERT_EQ(*iter, 1);
+ ++iter;
+ absl::chunked_queue<int64_t>::const_iterator copy_iter = iter;
+ ASSERT_FALSE(copy_iter == q.end());
+ ASSERT_EQ(*copy_iter, 2);
+ ++copy_iter;
+ ASSERT_TRUE(copy_iter == q.end());
+
+ copy_iter = iter;
+ ASSERT_FALSE(iter == q.end());
+ ASSERT_EQ(*iter, 2);
+ ++iter;
+ ASSERT_TRUE(iter == q.end());
+
+ ASSERT_FALSE(copy_iter == q.end());
+ ASSERT_EQ(*copy_iter, 2);
+ ++copy_iter;
+ ASSERT_TRUE(copy_iter == q.end());
+}
+
+TEST(ChunkedQueue, IteratorDefaultConstructor) {
+ using ConstIter = absl::chunked_queue<int64_t>::const_iterator;
+ using Iter = absl::chunked_queue<int64_t>::iterator;
+ ConstIter const_iter;
+ EXPECT_TRUE(const_iter == ConstIter());
+ Iter iter;
+ EXPECT_TRUE(iter == Iter());
+}
+
+TEST(ChunkedQueue, IteratorConversion) {
+ using ConstIter = absl::chunked_queue<int64_t>::const_iterator;
+ using Iter = absl::chunked_queue<int64_t>::iterator;
+ EXPECT_FALSE((std::is_convertible<ConstIter, Iter>::value));
+ EXPECT_TRUE((std::is_convertible<Iter, ConstIter>::value));
+ absl::chunked_queue<int64_t> q;
+ ConstIter it1 = q.begin();
+ ConstIter it2 = q.cbegin();
+ Iter it3 = q.begin();
+ it1 = q.end();
+ it2 = q.cend();
+ it3 = q.end();
+ EXPECT_FALSE((std::is_assignable<Iter, ConstIter>::value));
+}
+
+struct TestEntry {
+ int x, y;
+};
+
+TEST(ChunkedQueue, Iterator2) {
+ absl::chunked_queue<TestEntry> q;
+ TestEntry e;
+ e.x = 1;
+ e.y = 2;
+ q.push_back(e);
+ e.x = 3;
+ e.y = 4;
+ q.push_back(e);
+
+ absl::chunked_queue<TestEntry>::const_iterator iter = q.begin();
+ EXPECT_EQ(iter->x, 1);
+ EXPECT_EQ(iter->y, 2);
+ ++iter;
+ EXPECT_EQ(iter->x, 3);
+ EXPECT_EQ(iter->y, 4);
+ ++iter;
+ EXPECT_TRUE(iter == q.end());
+}
+
+TEST(ChunkedQueue, Iterator_MultipleBlocks) {
+ absl::chunked_queue<int64_t> q;
+ for (int i = 0; i < 130; ++i) {
+ absl::chunked_queue<int64_t>::const_iterator iter = q.begin();
+ for (int j = 0; j < i; ++j) {
+ ASSERT_FALSE(iter == q.end());
+ EXPECT_EQ(*iter, j);
+ ++iter;
+ }
+ ASSERT_TRUE(iter == q.end());
+ q.push_back(i);
+ }
+
+ for (int i = 0; i < 130; ++i) {
+ absl::chunked_queue<int64_t>::const_iterator iter = q.begin();
+ for (int j = i; j < 130; ++j) {
+ ASSERT_FALSE(iter == q.end());
+ EXPECT_EQ(*iter, j);
+ ++iter;
+ }
+ q.pop_front();
+ }
+ EXPECT_TRUE(q.empty());
+ EXPECT_TRUE(q.begin() == q.end());
+}
+
+TEST(ChunkedQueue, Iterator_PopFrontInvalidate) {
+ absl::chunked_queue<int64_t> q;
+ for (int i = 0; i < 130; ++i) {
+ q.push_back(i);
+ }
+
+ auto iter = q.begin();
+ for (int i = 0; i < 130; ++i) {
+ auto prev = iter++;
+ ASSERT_FALSE(prev == q.end());
+ EXPECT_EQ(*prev, i);
+ q.pop_front();
+ }
+ ASSERT_TRUE(q.empty());
+}
+
+TEST(ChunkedQueue, Iterator_PushBackInvalidate) {
+ absl::chunked_queue<int64_t, 2> q;
+ q.push_back(0);
+ auto i = q.begin();
+ EXPECT_EQ(*i, 0);
+ q.push_back(1);
+ EXPECT_EQ(*++i, 1);
+ q.push_back(2);
+ EXPECT_EQ(*++i, 2);
+}
+
+struct MyType {
+ static int constructor_calls;
+ static int destructor_calls;
+
+ explicit MyType(int x) : val(x) { constructor_calls++; }
+ MyType(const MyType& t) : val(t.val) { constructor_calls++; }
+ ~MyType() { destructor_calls++; }
+
+ int val;
+};
+
+int MyType::constructor_calls = 0;
+int MyType::destructor_calls = 0;
+
+TEST(ChunkedQueue, ConstructorDestructorCalls) {
+ for (int i = 0; i < 100; i++) {
+ std::vector<MyType> vals;
+ for (int j = 0; j < i; j++) {
+ vals.push_back(MyType(j));
+ }
+ MyType::constructor_calls = 0;
+ MyType::destructor_calls = 0;
+ {
+ absl::chunked_queue<MyType> q;
+ for (int j = 0; j < i; j++) {
+ q.push_back(vals[j]);
+ }
+ if (i % 10 == 0) {
+ q.clear();
+ } else {
+ for (int j = 0; j < i; j++) {
+ EXPECT_EQ(q.front().val, j);
+ q.pop_front();
+ }
+ }
+ }
+ EXPECT_EQ(MyType::constructor_calls, i);
+ EXPECT_EQ(MyType::destructor_calls, i);
+ }
+}
+
+TEST(ChunkedQueue, MoveObjects) {
+ absl::chunked_queue<std::unique_ptr<int>> q;
+ q.push_back(std::make_unique<int>(10));
+ q.push_back(std::make_unique<int>(11));
+
+ EXPECT_EQ(10, *q.front());
+ q.pop_front();
+ EXPECT_EQ(11, *q.front());
+ q.pop_front();
+}
+
+TEST(ChunkedQueue, EmplaceBack1) {
+ absl::chunked_queue<std::pair<int, int>> q;
+ auto& v = q.emplace_back(1, 2);
+ EXPECT_THAT(v, Pair(1, 2));
+ EXPECT_THAT(q.front(), Pair(1, 2));
+ EXPECT_EQ(&v, &q.back());
+}
+
+TEST(ChunkedQueue, EmplaceBack2) {
+ absl::chunked_queue<std::pair<std::unique_ptr<int>, std::string>> q;
+ auto& v = q.emplace_back(std::make_unique<int>(11), "val12");
+ EXPECT_THAT(v, Pair(Pointee(11), "val12"));
+ EXPECT_THAT(q.front(), Pair(Pointee(11), "val12"));
+}
+
+TEST(ChunkedQueue, OveralignmentEmplaceBack) {
+ struct alignas(64) Overaligned {
+ int x;
+ int y;
+ };
+ absl::chunked_queue<Overaligned, 1, 8> q;
+ for (int i = 0; i < 10; ++i) {
+ auto& v = q.emplace_back(Overaligned{i, i});
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(&v) % 64, 0);
+ }
+}
+
+TEST(ChunkedQueue, StatelessAllocatorDoesntAffectObjectSizes) {
+ // When a stateless allocator type is used -- such as when no explicit
+ // allocator type is given, and the stateless default is used -- it does not
+ // increase the object sizes from what they used to be before allocator
+ // support was added. (In practice this verifies that allocator support makes
+ // use of the empty base-class optimization.)
+ //
+ // These "Mock*" structs model the data members of absl::chunked_queue<> and
+ // its internal ChunkedQueueBlock<> type, without any extra storage for
+ // allocator state. (We use these to generate expected stateless-allocator
+ // object sizes in a portable way.)
+ struct MockQueue {
+ struct MockIterator {
+ void* block;
+ void* ptr;
+ void* limit;
+ };
+ MockIterator head;
+ MockIterator tail;
+ size_t size;
+ };
+ struct MockBlock {
+ void* next;
+ void* limit;
+ };
+ using TestQueueType = absl::chunked_queue<int64_t, 1, 16>;
+ EXPECT_EQ(sizeof(TestQueueType), sizeof(MockQueue));
+ EXPECT_EQ(sizeof(absl::container_internal::ChunkedQueueBlock<
+ TestQueueType::value_type, TestQueueType::allocator_type>),
+ sizeof(MockBlock));
+}
+
+TEST(ChunkedQueue, DoesNotRoundBlockSizesUpWithNonDefaultAllocator) {
+ using OneByte = uint8_t;
+ using CustomAllocator = absl::container_internal::CountingAllocator<OneByte>;
+ using Block =
+ absl::container_internal::ChunkedQueueBlock<OneByte, CustomAllocator>;
+ int64_t allocator_live_bytes = 0;
+ CustomAllocator allocator(&allocator_live_bytes);
+ // Create a Block big enough to accomodate at least 1 OneByte.
+ Block* b = Block::New(1, &allocator);
+ ASSERT_TRUE(b != nullptr);
+ // With a non-default allocator in play, the resulting block should have
+ // capacity for exactly 1 element -- the implementation should not round the
+ // allocation size up, which may be inappropriate for non-default allocators.
+ //
+ // (Note that we don't always round up even with the default allocator in use,
+ // e.g. when compiling for ASAN analysis.)
+ EXPECT_EQ(b->size(), 1);
+ Block::Delete(b, &allocator);
+}
+
+TEST(ChunkedQueue, Hardening) {
+ bool hardened = false;
+ ABSL_HARDENING_ASSERT([&hardened]() {
+ hardened = true;
+ return true;
+ }());
+ if (!hardened) {
+ GTEST_SKIP() << "Not a hardened build";
+ }
+
+ absl::chunked_queue<int> q;
+ EXPECT_DEATH_IF_SUPPORTED(q.front(), "");
+ EXPECT_DEATH_IF_SUPPORTED(q.back(), "");
+ EXPECT_DEATH_IF_SUPPORTED(q.pop_front(), "");
+
+ const absl::chunked_queue<int> cq;
+ EXPECT_DEATH_IF_SUPPORTED(cq.front(), "");
+ EXPECT_DEATH_IF_SUPPORTED(cq.back(), "");
+}
+
+} // namespace
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index b08735f..d47b0e4 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -84,11 +84,9 @@
static constexpr size_t kInlineBytesDefault = 256;
using AllocatorTraits = std::allocator_traits<A>;
- // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17,
- // but this seems to be mostly pedantic.
template <typename Iterator>
- using EnableIfForwardIterator = std::enable_if_t<
- base_internal::IsAtLeastForwardIterator<Iterator>::value>;
+ using EnableIfInputIterator =
+ std::enable_if_t<base_internal::IsAtLeastInputIterator<Iterator>::value>;
static constexpr bool NoexceptCopyable() {
return std::is_nothrow_copy_constructible<StorageElement>::value &&
absl::allocator_is_nothrow<allocator_type>::value;
@@ -161,8 +159,8 @@
// Creates an array initialized with the elements from the input
// range. The array's size will always be `std::distance(first, last)`.
- // REQUIRES: Iterator must be a forward_iterator or better.
- template <typename Iterator, EnableIfForwardIterator<Iterator>* = nullptr>
+ // REQUIRES: Iterator must be a input_iterator or better.
+ template <typename Iterator, EnableIfInputIterator<Iterator>* = nullptr>
FixedArray(Iterator first, Iterator last,
const allocator_type& a = allocator_type())
: storage_(std::distance(first, last), a) {
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index f4f41d0..e1649e3 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -672,27 +672,36 @@
std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
const std::pair<iterator, bool> ret =
this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
+ if (!ret.second) {
+ // NOLINTNEXTLINE(bugprone-use-after-move)
+ ret.first->second = std::forward<M>(obj);
+ }
return ret;
}
template <class K, class M>
iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
+ if (!ret.second) {
+ // NOLINTNEXTLINE(bugprone-use-after-move)
+ ret.first->second = std::forward<M>(obj);
+ }
return ret.first;
}
template <class K, class... Args>
std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
return this->tree_.insert_unique(
+ // NOLINTNEXTLINE(bugprone-use-after-move)
k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
}
template <class K, class... Args>
iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
return this->tree_
- .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
+ .insert_hint_unique(iterator(hint),
+ // NOLINTNEXTLINE(bugprone-use-after-move)
+ k, std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...))
.first;
diff --git a/absl/container/internal/chunked_queue.h b/absl/container/internal/chunked_queue.h
new file mode 100644
index 0000000..c3718ac
--- /dev/null
+++ b/absl/container/internal/chunked_queue.h
@@ -0,0 +1,173 @@
+// Copyright 2025 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_
+#define ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/layout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// ChunkedQueueBlock defines a node in a forward list of uninitialized storage
+// of size T's. The user is responsible for constructing and destroying T's in
+// said storage.
+//
+// ChunkedQueueBlock::New(size) returns said node, with at least size_hint T's
+// of uninitialized storage.
+template <typename T, typename Allocator>
+class ChunkedQueueBlock {
+ private:
+ using ChunkedQueueBlockAllocator = typename std::allocator_traits<
+ Allocator>::template rebind_alloc<ChunkedQueueBlock>;
+ using ByteAllocator =
+ typename std::allocator_traits<Allocator>::template rebind_alloc<char>;
+
+ public:
+ // NB, instances of this must not be created or destroyed directly, only via
+ // the New() and Delete() methods. (This notionally-private constructor is
+ // public only to allow access from allocator types used by New().)
+ explicit ChunkedQueueBlock(size_t size)
+ : next_(nullptr), limit_(start() + size) {}
+
+ // Must be deleted by ChunkedQueueBlock::Delete.
+ static ChunkedQueueBlock* New(size_t size_hint, Allocator* alloc) { // NOLINT
+ ABSL_ASSERT(size_hint >= size_t{1});
+ size_t allocation_bytes = AllocSize(size_hint);
+ void* mem;
+ std::tie(mem, allocation_bytes) = Allocate(allocation_bytes, alloc);
+ const size_t element_count =
+ (allocation_bytes - start_offset()) / sizeof(T);
+ ChunkedQueueBlock* as_block = static_cast<ChunkedQueueBlock*>(mem);
+ ChunkedQueueBlockAllocator block_alloc(*alloc);
+ std::allocator_traits<ChunkedQueueBlockAllocator>::construct(
+ block_alloc, as_block, element_count);
+ return as_block;
+ }
+
+ static void Delete(ChunkedQueueBlock* ptr, Allocator* alloc) {
+ const size_t allocation_bytes = AllocSize(ptr->size());
+ ChunkedQueueBlockAllocator block_alloc(*alloc);
+ std::allocator_traits<ChunkedQueueBlockAllocator>::destroy(block_alloc,
+ ptr);
+ if constexpr (std::is_same_v<ByteAllocator, std::allocator<char>>) {
+#ifdef __STDCPP_DEFAULT_NEW_ALIGNMENT__
+ if (alignment() > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
+ ::operator delete(ptr
+#ifdef __cpp_sized_deallocation
+ ,
+ allocation_bytes
+#endif
+ ,
+ std::align_val_t(alignment()));
+ return;
+ }
+#endif
+ ::operator delete(ptr);
+ } else {
+ void* mem = ptr;
+ ByteAllocator byte_alloc(*alloc);
+ std::allocator_traits<ByteAllocator>::deallocate(
+ byte_alloc, static_cast<char*>(mem), allocation_bytes);
+ }
+ }
+
+ ChunkedQueueBlock* next() const { return next_; }
+ void set_next(ChunkedQueueBlock* next) { next_ = next; }
+ T* start() {
+ return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(this) +
+ start_offset());
+ }
+ T* limit() { return limit_; }
+ size_t size() { return limit() - start(); }
+
+ static constexpr size_t block_size_from_bytes(size_t bytes) {
+ return bytes <= static_cast<size_t>(start_offset())
+ ? size_t{1}
+ : elements_in_bytes(bytes - start_offset());
+ }
+
+ private:
+ ChunkedQueueBlock(const ChunkedQueueBlock&) = delete;
+ ChunkedQueueBlock& operator=(const ChunkedQueueBlock&) = delete;
+
+ // The byte size to allocate to ensure space for `min_element_count` elements.
+ static constexpr size_t AllocSize(size_t min_element_count) {
+ return absl::container_internal::Layout<ChunkedQueueBlock, T>(
+ 1, min_element_count)
+ .AllocSize();
+ }
+
+ static constexpr ptrdiff_t start_offset() {
+ return absl::container_internal::Layout<ChunkedQueueBlock, T>(1, 1)
+ .template Offset<1>();
+ }
+
+ static constexpr size_t alignment() {
+ return absl::container_internal::Layout<ChunkedQueueBlock, T>(1, 1)
+ .Alignment();
+ }
+
+ static constexpr size_t elements_in_bytes(size_t bytes) {
+ return (bytes + sizeof(T) - 1) / sizeof(T);
+ }
+
+ static std::pair<void*, size_t> Allocate(size_t allocation_bytes,
+ Allocator* alloc) {
+ // If we're using the default allocator, then we can use new.
+ void* mem;
+ if constexpr (std::is_same_v<ByteAllocator, std::allocator<char>>) {
+ // Older GCC versions have an unused variable warning on `alloc` inside
+ // this constexpr branch.
+ static_cast<void>(alloc);
+#ifdef __STDCPP_DEFAULT_NEW_ALIGNMENT__
+ if (alignment() > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
+ // Align the allocation to respect alignof(T).
+ mem = ::operator new(allocation_bytes, std::align_val_t(alignment()));
+ return {mem, allocation_bytes};
+ }
+#endif
+ mem = ::operator new(allocation_bytes);
+ } else {
+ ByteAllocator byte_alloc(*alloc);
+ mem = std::allocator_traits<ByteAllocator>::allocate(byte_alloc,
+ allocation_bytes);
+ }
+ return {mem, allocation_bytes};
+ }
+
+ ChunkedQueueBlock* next_;
+ T* limit_;
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index 608a865..47064a7 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -135,6 +135,7 @@
template <class T, size_t... Is>
auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
-> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
+ // NOLINTNEXTLINE(bugprone-use-after-move)
return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
}
diff --git a/absl/container/internal/raw_hash_set_probe_benchmark.cc b/absl/container/internal/raw_hash_set_probe_benchmark.cc
index a09f7d9..7165558 100644
--- a/absl/container/internal/raw_hash_set_probe_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_probe_benchmark.cc
@@ -556,9 +556,11 @@
// Check the regex again. We might had have enabled only one of the
// stats for the benchmark.
if (!CanRunBenchmark(name)) return;
+ // Report at least 1, because benchy drops results with zero.
+ double reported_value = std::max(1e9 * result.ratios.*val, 1.0);
absl::PrintF(" %s{\n", comma);
- absl::PrintF(" \"cpu_time\": %f,\n", 1e9 * result.ratios.*val);
- absl::PrintF(" \"real_time\": %f,\n", 1e9 * result.ratios.*val);
+ absl::PrintF(" \"cpu_time\": %f,\n", reported_value);
+ absl::PrintF(" \"real_time\": %f,\n", reported_value);
absl::PrintF(" \"iterations\": 1,\n");
absl::PrintF(" \"name\": \"%s\",\n", name);
absl::PrintF(" \"time_unit\": \"ns\"\n");
diff --git a/absl/container/linked_hash_map.h b/absl/container/linked_hash_map.h
index cec0ada..8348011 100644
--- a/absl/container/linked_hash_map.h
+++ b/absl/container/linked_hash_map.h
@@ -628,7 +628,10 @@
std::pair<iterator, bool> InsertOrAssignInternal(K&& k, V&& v) {
auto [it, inserted] =
LazyEmplaceInternal(std::forward<K>(k), std::forward<V>(v));
- if (!inserted) it->second = std::forward<V>(v);
+ if (!inserted) {
+ // NOLINTNEXTLINE(bugprone-use-after-move)
+ it->second = std::forward<V>(v);
+ }
return {it, inserted};
}
diff --git a/absl/debugging/internal/examine_stack.cc b/absl/debugging/internal/examine_stack.cc
index 3dd6ba1..cc24ebd 100644
--- a/absl/debugging/internal/examine_stack.cc
+++ b/absl/debugging/internal/examine_stack.cc
@@ -149,6 +149,10 @@
debug_stack_trace_hook = hook;
}
+SymbolizeUrlEmitterLegacy GetDebugStackTraceHookLegacy() {
+ return debug_stack_trace_hook;
+}
+
SymbolizeUrlEmitter GetDebugStackTraceHook() { return debug_stack_trace_hook; }
// Returns the program counter from signal context, nullptr if
diff --git a/absl/debugging/internal/examine_stack.h b/absl/debugging/internal/examine_stack.h
index 190af87..2094d62 100644
--- a/absl/debugging/internal/examine_stack.h
+++ b/absl/debugging/internal/examine_stack.h
@@ -32,12 +32,19 @@
// `hook` may be called from a signal handler.
typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth,
OutputWriter* writer, void* writer_arg);
+typedef void (*SymbolizeUrlEmitterLegacy)(void* const stack[], int depth,
+ OutputWriter* writer,
+ void* writer_arg);
// Registration of SymbolizeUrlEmitter for use inside of a signal handler.
// This is inherently unsafe and must be signal safe code.
void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook);
SymbolizeUrlEmitter GetDebugStackTraceHook();
+// Currently exact copy of above. Needed for the 3-CL dance due to
+// TCMallocDebugStackTraceHook dependency on this API.
+SymbolizeUrlEmitterLegacy GetDebugStackTraceHookLegacy();
+
// Returns the program counter from signal context, or nullptr if
// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index 02df7fa..37bd39d 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -74,6 +74,7 @@
#include <vector>
#include "absl/base/attributes.h"
+#include "absl/base/internal/endian.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
@@ -93,6 +94,38 @@
#include <filesystem> // NOLINT
#endif
+// 32-bit builds with SSE 4.2 do not have _mm_crc32_u64, so the
+// __x86_64__ condition is necessary.
+#if defined(__SSE4_2__) && defined(__x86_64__)
+
+#include <x86intrin.h>
+#define ABSL_HASH_INTERNAL_HAS_CRC32
+#define ABSL_HASH_INTERNAL_CRC32_U64 _mm_crc32_u64
+#define ABSL_HASH_INTERNAL_CRC32_U32 _mm_crc32_u32
+#define ABSL_HASH_INTERNAL_CRC32_U8 _mm_crc32_u8
+
+#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__)
+
+// MSVC AVX (/arch:AVX) implies SSE 4.2.
+#include <intrin.h>
+#define ABSL_HASH_INTERNAL_HAS_CRC32
+#define ABSL_HASH_INTERNAL_CRC32_U64 _mm_crc32_u64
+#define ABSL_HASH_INTERNAL_CRC32_U32 _mm_crc32_u32
+#define ABSL_HASH_INTERNAL_CRC32_U8 _mm_crc32_u8
+
+#elif defined(__ARM_FEATURE_CRC32)
+
+#include <arm_acle.h>
+#define ABSL_HASH_INTERNAL_HAS_CRC32
+// Casting to uint32_t to be consistent with x86 intrinsic (_mm_crc32_u64
+// accepts crc as 64 bit integer).
+#define ABSL_HASH_INTERNAL_CRC32_U64(crc, data) \
+ __crc32cd(static_cast<uint32_t>(crc), data)
+#define ABSL_HASH_INTERNAL_CRC32_U32 __crc32cw
+#define ABSL_HASH_INTERNAL_CRC32_U8 __crc32cb
+
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -965,18 +998,20 @@
return Uint128High64(m) ^ Uint128Low64(m);
}
-// Reads 8 bytes from p.
-inline uint64_t Read8(const unsigned char* p) {
// Suppress erroneous array bounds errors on GCC.
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
+inline uint32_t Read4(const unsigned char* p) {
+ return absl::base_internal::UnalignedLoad32(p);
+}
+inline uint64_t Read8(const unsigned char* p) {
return absl::base_internal::UnalignedLoad64(p);
+}
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop
#endif
-}
// Reads 9 to 16 bytes from p.
// The first 8 bytes are in .first, and the rest of the bytes are in .second
@@ -1096,6 +1131,70 @@
return CombineLargeContiguousImplOn32BitLengthGt8(state, first, len);
}
+#ifdef ABSL_HASH_INTERNAL_HAS_CRC32
+inline uint64_t CombineContiguousImpl(
+ uint64_t state, const unsigned char* first, size_t len,
+ std::integral_constant<int, 8> /* sizeof_size_t */) {
+ if (ABSL_PREDICT_FALSE(len > 32)) {
+ return CombineLargeContiguousImplOn64BitLengthGt32(state, first, len);
+ }
+ // `mul` is the salt that is used for final mixing. It is important to fill
+ // high 32 bits because CRC wipes out high 32 bits.
+ // `rotr` is important to mix `len` into high 32 bits.
+ uint64_t mul = absl::rotr(kMul, static_cast<int>(len));
+ // Only low 32 bits of each uint64_t are used in CRC32 so we use gbswap_64 to
+ // move high 32 bits to low 32 bits. It has slightly smaller binary size than
+ // `>> 32`. `state + 8 * len` is a single instruction on both x86 and ARM, so
+ // we use it to better mix length. Although only the low 32 bits of the pair
+ // elements are used, we use pair<uint64_t, uint64_t> for better generated
+ // code.
+ std::pair<uint64_t, uint64_t> crcs = {state + 8 * len,
+ absl::gbswap_64(state)};
+
+ // All CRC operations here directly read bytes from the memory.
+ // Single fused instructions are used, like `crc32 rcx, qword ptr [rsi]`.
+ // On x86, llvm-mca reports latency `R + 2` for such fused instructions, while
+ // `R + 3` for two separate `mov` + `crc` instructions. `R` is the latency of
+ // reading the memory. Fused instructions also reduce register pressure
+ // allowing surrounding code to be more efficient when this code is inlined.
+ if (len > 8) {
+ crcs = {ABSL_HASH_INTERNAL_CRC32_U64(crcs.first, Read8(first)),
+ ABSL_HASH_INTERNAL_CRC32_U64(crcs.second, Read8(first + len - 8))};
+ if (len > 16) {
+ // We compute the second round of dependent CRC32 operations.
+ crcs = {ABSL_HASH_INTERNAL_CRC32_U64(crcs.first, Read8(first + len - 16)),
+ ABSL_HASH_INTERNAL_CRC32_U64(crcs.second, Read8(first + 8))};
+ }
+ } else {
+ if (len >= 4) {
+ // We use CRC for 4 bytes to benefit from the fused instruction and better
+ // hash quality.
+ // Using `xor` or `add` may reduce latency for this case, but would
+ // require more registers, more instructions and will have worse hash
+ // quality.
+ crcs = {ABSL_HASH_INTERNAL_CRC32_U32(static_cast<uint32_t>(crcs.first),
+ Read4(first)),
+ ABSL_HASH_INTERNAL_CRC32_U32(static_cast<uint32_t>(crcs.second),
+ Read4(first + len - 4))};
+ } else if (len >= 1) {
+ // We mix three bytes all into different output registers.
+ // This way, we do not need shifting of these bytes (so they don't overlap
+ // with each other).
+ crcs = {ABSL_HASH_INTERNAL_CRC32_U8(static_cast<uint32_t>(crcs.first),
+ first[0]),
+ ABSL_HASH_INTERNAL_CRC32_U8(static_cast<uint32_t>(crcs.second),
+ first[len - 1])};
+ // Middle byte is mixed weaker. It is a new byte only for len == 3.
+ // Mixing is independent from CRC operations so it is scheduled ASAP.
+ mul += first[len / 2];
+ }
+ }
+ // `mul` is mixed into both sides of `Mix` to guarantee non-zero values for
+ // both multiplicands. Using Mix instead of just multiplication here improves
+ // hash quality, especially for short strings.
+ return Mix(mul - crcs.first, crcs.second - mul);
+}
+#else
inline uint64_t CombineContiguousImpl(
uint64_t state, const unsigned char* first, size_t len,
std::integral_constant<int, 8> /* sizeof_size_t */) {
@@ -1118,6 +1217,7 @@
// to calling CombineLargeContiguousImpl once with 2 * PiecewiseChunkSize().
return CombineLargeContiguousImplOn64BitLengthGt32(state, first, len);
}
+#endif // ABSL_HASH_INTERNAL_HAS_CRC32
#if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \
ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
@@ -1452,4 +1552,9 @@
ABSL_NAMESPACE_END
} // namespace absl
+#undef ABSL_HASH_INTERNAL_HAS_CRC32
+#undef ABSL_HASH_INTERNAL_CRC32_U64
+#undef ABSL_HASH_INTERNAL_CRC32_U32
+#undef ABSL_HASH_INTERNAL_CRC32_U8
+
#endif // ABSL_HASH_INTERNAL_HASH_H_
diff --git a/absl/strings/internal/str_format/convert_test.cc b/absl/strings/internal/str_format/convert_test.cc
index ea0329c..f340df4 100644
--- a/absl/strings/internal/str_format/convert_test.cc
+++ b/absl/strings/internal/str_format/convert_test.cc
@@ -284,6 +284,11 @@
return native_traits;
}
+bool IsNativeHexFloatConversion(char f) { return f == 'a' || f == 'A'; }
+bool IsNativeFloatConversion(char f) {
+ return f == 'f' || f == 'F' || f == 'e' || f == 'E' || f == 'a' || f == 'A';
+}
+
class FormatConvertTest : public ::testing::Test { };
template <typename T>
@@ -799,20 +804,19 @@
'e', 'E'}) {
std::string fmt_str = std::string(fmt) + f;
- if (fmt == absl::string_view("%.5000") && f != 'f' && f != 'F' &&
- f != 'a' && f != 'A') {
+ if (fmt == absl::string_view("%.5000") && !IsNativeFloatConversion(f)) {
// This particular test takes way too long with snprintf.
// Disable for the case we are not implementing natively.
continue;
}
- if ((f == 'a' || f == 'A') &&
+ if (IsNativeHexFloatConversion(f) &&
!native_traits.hex_float_has_glibc_rounding) {
continue;
}
if (!native_traits.hex_float_prefers_denormal_repr &&
- (f == 'a' || f == 'A') &&
+ IsNativeHexFloatConversion(f) &&
std::fpclassify(tested_float) == FP_SUBNORMAL) {
continue;
}
@@ -1328,14 +1332,13 @@
'e', 'E'}) {
std::string fmt_str = std::string(fmt) + 'L' + f;
- if (fmt == absl::string_view("%.5000") && f != 'f' && f != 'F' &&
- f != 'a' && f != 'A') {
+ if (fmt == absl::string_view("%.5000") && !IsNativeFloatConversion(f)) {
// This particular test takes way too long with snprintf.
// Disable for the case we are not implementing natively.
continue;
}
- if (f == 'a' || f == 'A') {
+ if (IsNativeHexFloatConversion(f)) {
if (!native_traits.hex_float_has_glibc_rounding ||
!native_traits.hex_float_optimizes_leading_digit_bit_count) {
continue;
diff --git a/absl/strings/internal/str_format/float_conversion.cc b/absl/strings/internal/str_format/float_conversion.cc
index aa31998..0168662 100644
--- a/absl/strings/internal/str_format/float_conversion.cc
+++ b/absl/strings/internal/str_format/float_conversion.cc
@@ -20,7 +20,10 @@
#include <array>
#include <cassert>
#include <cmath>
+#include <cstdint>
+#include <cstring>
#include <limits>
+#include <optional>
#include <string>
#include "absl/base/attributes.h"
@@ -31,7 +34,9 @@
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/numeric/internal/representation.h"
+#include "absl/strings/internal/str_format/extension.h"
#include "absl/strings/numbers.h"
+#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
@@ -451,6 +456,71 @@
return p;
}
+struct FractionalDigitPrinterResult {
+ char* end;
+ size_t skipped_zeros;
+ bool nonzero_remainder;
+};
+
+FractionalDigitPrinterResult PrintFractionalDigitsScientific(
+ uint64_t v, char* start, int exp, size_t precision, bool skip_zeros) {
+ char* p = start;
+ v <<= (64 - exp);
+
+ size_t skipped_zeros = 0;
+ while (v != 0 && precision > 0) {
+ char carry = MultiplyBy10WithCarry(&v, 0);
+ if (skip_zeros) {
+ if (carry == 0) {
+ ++skipped_zeros;
+ continue;
+ }
+ skip_zeros = false;
+ }
+ *p++ = carry + '0';
+ --precision;
+ }
+ return {p, skipped_zeros, v != 0};
+}
+
+FractionalDigitPrinterResult PrintFractionalDigitsScientific(
+ uint128 v, char* start, int exp, size_t precision, bool skip_zeros) {
+ char* p = start;
+ v <<= (128 - exp);
+ auto high = static_cast<uint64_t>(v >> 64);
+ auto low = static_cast<uint64_t>(v);
+
+ size_t skipped_zeros = 0;
+ while (precision > 0 && low != 0) {
+ char carry = MultiplyBy10WithCarry(&low, 0);
+ carry = MultiplyBy10WithCarry(&high, carry);
+ if (skip_zeros) {
+ if (carry == 0) {
+ ++skipped_zeros;
+ continue;
+ }
+ skip_zeros = false;
+ }
+ *p++ = carry + '0';
+ --precision;
+ }
+
+ while (precision > 0 && high != 0) {
+ char carry = MultiplyBy10WithCarry(&high, 0);
+ if (skip_zeros) {
+ if (carry == 0) {
+ ++skipped_zeros;
+ continue;
+ }
+ skip_zeros = false;
+ }
+ *p++ = carry + '0';
+ --precision;
+ }
+
+ return {p, skipped_zeros, high != 0 || low != 0};
+}
+
struct FormatState {
char sign_char;
size_t precision;
@@ -1333,6 +1403,427 @@
sink->Append(right_spaces, ' ');
}
+template <typename Int>
+void FormatE(Int mantissa, int exp, bool uppercase, const FormatState& state) {
+ if (exp > 0) {
+ const int total_bits =
+ static_cast<int>(sizeof(Int) * 8) - LeadingZeros(mantissa) + exp;
+ if (total_bits > 128) {
+ FormatEPositiveExpSlow(mantissa, exp, uppercase, state);
+ return;
+ }
+ } else {
+ if (ABSL_PREDICT_FALSE(exp < -128)) {
+ FormatENegativeExpSlow(mantissa, exp, uppercase, state);
+ return;
+ }
+ }
+ FormatEFast(mantissa, exp, uppercase, state);
+}
+
+// Guaranteed to fit into 128 bits at this point
+template <typename Int>
+void FormatEFast(Int v, int exp, bool uppercase, const FormatState& state) {
+ if (!v) {
+ absl::string_view mantissa_str = state.ShouldPrintDot() ? "0." : "0";
+ FinalPrint(state, mantissa_str, 0, state.precision,
+ uppercase ? "E+00" : "e+00");
+ return;
+ }
+ constexpr int kInputBits = sizeof(Int) * 8;
+ constexpr int kMaxFractionalDigits = 128;
+ constexpr int kBufferSize = 2 + // '.' + rounding
+ kMaxFixedPrecision + // Integral
+ kMaxFractionalDigits; // Fractional
+ const int total_bits = kInputBits - LeadingZeros(v) + exp;
+ char buffer[kBufferSize];
+ char* integral_start = buffer + 2;
+ char* integral_end = buffer + 2 + kMaxFixedPrecision;
+ char* final_start;
+ char* final_end;
+ bool zero_integral = false;
+ int scientific_exp = 0;
+ size_t digits_printed = 0;
+ size_t trailing_zeros = 0;
+ bool has_more_non_zero = false;
+
+ auto check_integral_zeros =
+ [](char* const begin, char* const end,
+ const size_t precision, size_t digits_processed) -> bool {
+ // When considering rounding to even, we care about the digits after the
+ // round digit which means the total digits to move from the start is
+ // precision + 2 since the first digit we print before the decimal point
+ // is not a part of precision.
+ size_t digit_upper_bound = precision + 2;
+ if (digits_processed > digit_upper_bound) {
+ return std::any_of(begin + digit_upper_bound, end,
+ [](char c) { return c != '0'; });
+ }
+ return false;
+ };
+
+ if (exp >= 0) {
+ integral_end = total_bits <= 64 ? numbers_internal::FastIntToBuffer(
+ static_cast<uint64_t>(v) << exp, integral_start)
+ : numbers_internal::FastIntToBuffer(
+ static_cast<uint128>(v) << exp, integral_start);
+ *integral_end = '0';
+ final_start = integral_start;
+ // Integral is guaranteed to be non-zero at this point.
+ scientific_exp = static_cast<int>(integral_end - integral_start) - 1;
+ digits_printed = static_cast<size_t>(integral_end - integral_start);
+ final_end = integral_end;
+ has_more_non_zero = check_integral_zeros(integral_start, integral_end,
+ state.precision, digits_printed);
+ } else {
+ exp = -exp;
+ if (exp < kInputBits) {
+ integral_end =
+ numbers_internal::FastIntToBuffer(v >> exp, integral_start);
+ }
+ *integral_end = '0';
+ // We didn't move integral_start and it gets set to 0 in
+ zero_integral = exp >= kInputBits || v >> exp == 0;
+ if (!zero_integral) {
+ digits_printed = static_cast<size_t>(integral_end - integral_start);
+ has_more_non_zero = check_integral_zeros(integral_start, integral_end,
+ state.precision, digits_printed);
+ final_end = integral_end;
+ }
+ // Print fractional digits
+ char* fractional_start = integral_end;
+
+ size_t digits_to_print = (state.precision + 1) >= digits_printed
+ ? state.precision + 1 - digits_printed
+ : 0;
+ bool print_extra = digits_printed <= state.precision + 1;
+ auto [fractional_end, skipped_zeros, has_nonzero_rem] =
+ exp <= 64 ? PrintFractionalDigitsScientific(
+ v, fractional_start, exp, digits_to_print + print_extra,
+ zero_integral)
+ : PrintFractionalDigitsScientific(
+ static_cast<uint128>(v), fractional_start, exp,
+ digits_to_print + print_extra, zero_integral);
+ final_end = fractional_end;
+ *fractional_end = '0';
+ has_more_non_zero |= has_nonzero_rem;
+ digits_printed += static_cast<size_t>(fractional_end - fractional_start);
+ if (zero_integral) {
+ scientific_exp = -1 * static_cast<int>(skipped_zeros + 1);
+ } else {
+ scientific_exp = static_cast<int>(integral_end - integral_start) - 1;
+ }
+ // Don't do any rounding here, we will do it ourselves.
+ final_start = zero_integral ? fractional_start : integral_start;
+ }
+
+ // For rounding
+ if (digits_printed >= state.precision + 1) {
+ final_start[-1] = '0';
+ char* round_digit_ptr = final_start + 1 + state.precision;
+ if (*round_digit_ptr > '5') {
+ RoundUp(round_digit_ptr - 1);
+ } else if (*round_digit_ptr == '5') {
+ if (has_more_non_zero) {
+ RoundUp(round_digit_ptr - 1);
+ } else {
+ RoundToEven(round_digit_ptr - 1);
+ }
+ }
+ final_end = round_digit_ptr;
+ if (final_start[-1] == '1') {
+ --final_start;
+ ++scientific_exp;
+ --final_end;
+ }
+ } else {
+ // Need to pad with zeros.
+ trailing_zeros = state.precision - (digits_printed - 1);
+ }
+
+ if (state.precision > 0 || state.ShouldPrintDot()) {
+ final_start[-1] = *final_start;
+ *final_start = '.';
+ --final_start;
+ }
+
+ // We need to add 2 to the buffer size for the +/- sign and the e
+ constexpr size_t kExpBufferSize = numbers_internal::kFastToBufferSize + 2;
+ char exp_buffer[kExpBufferSize];
+ char* exp_ptr_start = exp_buffer;
+ char* exp_ptr = exp_ptr_start;
+ *exp_ptr++ = uppercase ? 'E' : 'e';
+ if (scientific_exp >= 0) {
+ *exp_ptr++ = '+';
+ } else {
+ *exp_ptr++ = '-';
+ scientific_exp = -scientific_exp;
+ }
+
+ if (scientific_exp < 10) {
+ *exp_ptr++ = '0';
+ }
+ exp_ptr = numbers_internal::FastIntToBuffer(scientific_exp, exp_ptr);
+ FinalPrint(state,
+ absl::string_view(final_start,
+ static_cast<size_t>(final_end - final_start)),
+ 0, trailing_zeros,
+ absl::string_view(exp_ptr_start,
+ static_cast<size_t>(exp_ptr - exp_ptr_start)));
+}
+
+void FormatENegativeExpSlow(uint128 mantissa, int exp, bool uppercase,
+ const FormatState& state) {
+ assert(exp < 0);
+
+ FractionalDigitGenerator::RunConversion(
+ mantissa, -exp,
+ [&](FractionalDigitGenerator digit_gen) {
+ int first_digit = 0;
+ size_t nines = 0;
+ int num_leading_zeros = 0;
+ while (digit_gen.HasMoreDigits()) {
+ auto digits = digit_gen.GetDigits();
+ if (digits.digit_before_nine != 0) {
+ first_digit = digits.digit_before_nine;
+ nines = digits.num_nines;
+ break;
+ } else if (digits.num_nines > 0) {
+ // This also means the first digit is 0
+ first_digit = 9;
+ nines = digits.num_nines - 1;
+ num_leading_zeros++;
+ break;
+ }
+ num_leading_zeros++;
+ }
+
+ bool change_to_zeros = false;
+ if (nines >= state.precision || state.precision == 0) {
+ bool round_up = false;
+ if (nines == state.precision) {
+ round_up = digit_gen.IsGreaterThanHalf();
+ } else {
+ round_up = nines > 0 || digit_gen.IsGreaterThanHalf();
+ }
+ if (round_up) {
+ first_digit = (first_digit == 9 ? 1 : first_digit + 1);
+ num_leading_zeros -= (first_digit == 1);
+ change_to_zeros = true;
+ }
+ }
+ int scientific_exp = -(num_leading_zeros + 1);
+ assert(scientific_exp < 0);
+ char exp_buffer[numbers_internal::kFastToBufferSize];
+ char* exp_start = exp_buffer;
+ *exp_start++ = '-';
+ if (scientific_exp > -10) {
+ *exp_start++ = '0';
+ }
+ scientific_exp *= -1;
+ char* exp_end =
+ numbers_internal::FastIntToBuffer(scientific_exp, exp_start);
+ const size_t total_digits =
+ 1 // First digit
+ + (state.ShouldPrintDot() ? 1 : 0) // Decimal point
+ + state.precision // Digits after decimal
+ + 1 // 'e' or 'E'
+ + static_cast<size_t>(exp_end - exp_buffer); // Exponent digits
+
+ const auto padding = ExtraWidthToPadding(
+ total_digits + (state.sign_char != '\0' ? 1 : 0), state);
+ state.sink->Append(padding.left_spaces, ' ');
+
+ if (state.sign_char != '\0') {
+ state.sink->Append(1, state.sign_char);
+ }
+
+ state.sink->Append(1, static_cast<char>(first_digit + '0'));
+ if (state.ShouldPrintDot()) {
+ state.sink->Append(1, '.');
+ }
+ size_t digits_to_go = state.precision;
+ size_t nines_to_print = std::min(nines, digits_to_go);
+ state.sink->Append(nines_to_print, change_to_zeros ? '0' : '9');
+ digits_to_go -= nines_to_print;
+ while (digits_to_go > 0 && digit_gen.HasMoreDigits()) {
+ auto digits = digit_gen.GetDigits();
+
+ if (digits.num_nines + 1 < digits_to_go) {
+ state.sink->Append(1, digits.digit_before_nine + '0');
+ state.sink->Append(digits.num_nines, '9');
+ digits_to_go -= digits.num_nines + 1;
+ } else {
+ bool round_up = false;
+ if (digits.num_nines + 1 > digits_to_go) {
+ round_up = true;
+ } else if (digit_gen.IsGreaterThanHalf()) {
+ round_up = true;
+ } else if (digit_gen.IsExactlyHalf()) {
+ round_up =
+ digits.num_nines != 0 || digits.digit_before_nine % 2 == 1;
+ }
+ if (round_up) {
+ state.sink->Append(1, digits.digit_before_nine + '1');
+ --digits_to_go;
+ } else {
+ state.sink->Append(1, digits.digit_before_nine + '0');
+ state.sink->Append(digits_to_go - 1, '9');
+ digits_to_go = 0;
+ }
+ break;
+ }
+ }
+ state.sink->Append(digits_to_go, '0');
+ state.sink->Append(1, uppercase ? 'E' : 'e');
+ state.sink->Append(absl::string_view(
+ exp_buffer, static_cast<size_t>(exp_end - exp_buffer)));
+ state.sink->Append(padding.right_spaces, ' ');
+ });
+}
+
+std::optional<int> GetOneDigit(BinaryToDecimal& btd,
+ absl::string_view& digits_view) {
+ if (digits_view.empty()) {
+ if (!btd.AdvanceDigits()) return std::nullopt;
+ digits_view = btd.CurrentDigits();
+ }
+ char d = digits_view.front();
+ digits_view.remove_prefix(1);
+ return d - '0';
+}
+
+struct DigitRun {
+ std::optional<int> digit;
+ size_t nines;
+};
+
+DigitRun GetDigits(BinaryToDecimal& btd, absl::string_view& digits_view) {
+ auto peek_digit = [&]() -> std::optional<int> {
+ if (digits_view.empty()) {
+ if (!btd.AdvanceDigits()) return std::nullopt;
+ digits_view = btd.CurrentDigits();
+ }
+ return digits_view.front() - '0';
+ };
+
+ auto digit_before_nines = GetOneDigit(btd, digits_view);
+ if (!digit_before_nines.has_value()) return {std::nullopt, 0};
+
+ auto next_digit = peek_digit();
+ size_t num_nines = 0;
+ while (next_digit == 9) {
+ // consume the 9
+ GetOneDigit(btd, digits_view);
+ ++num_nines;
+ next_digit = peek_digit();
+ }
+ return digit_before_nines == 9
+ ? DigitRun{std::nullopt, num_nines + 1}
+ : DigitRun{digit_before_nines, num_nines};
+}
+
+void FormatEPositiveExpSlow(uint128 mantissa, int exp, bool uppercase,
+ const FormatState& state) {
+ BinaryToDecimal::RunConversion(
+ mantissa, exp, [&](BinaryToDecimal btd) {
+ int scientific_exp = static_cast<int>(btd.TotalDigits() - 1);
+ absl::string_view digits_view = btd.CurrentDigits();
+
+ size_t digits_to_go = state.precision + 1;
+ auto [first_digit_opt, nines] = GetDigits(btd, digits_view);
+ if (!first_digit_opt.has_value() && nines == 0) {
+ return;
+ }
+
+ int first_digit = first_digit_opt.value_or(9);
+ if (!first_digit_opt) {
+ --nines;
+ }
+
+ // At this point we are guaranteed to have some sort of first digit
+ bool change_to_zeros = false;
+ if (nines + 1 >= digits_to_go) {
+ // Everything we need to print is in the first DigitRun
+ auto [next_digit_opt, next_nines] = GetDigits(btd, digits_view);
+ if (nines == state.precision) {
+ change_to_zeros = next_digit_opt.value_or(0) > 4;
+ } else {
+ change_to_zeros = true;
+ }
+ if (change_to_zeros) {
+ if (first_digit != 9) {
+ first_digit = first_digit + 1;
+ } else {
+ first_digit = 1;
+ ++scientific_exp;
+ }
+ }
+ }
+
+ char exp_buffer[numbers_internal::kFastToBufferSize];
+ char* exp_buffer_end =
+ numbers_internal::FastIntToBuffer(scientific_exp, exp_buffer);
+ const size_t total_digits_out =
+ 1 + state.ShouldPrintDot() + state.precision + 2 +
+ (static_cast<size_t>(exp_buffer_end - exp_buffer));
+
+ const auto padding = ExtraWidthToPadding(
+ total_digits_out + (state.sign_char != '\0' ? 1 : 0), state);
+
+ state.sink->Append(padding.left_spaces, ' ');
+ if (state.sign_char != '\0') {
+ state.sink->Append(1, state.sign_char);
+ }
+ state.sink->Append(1, static_cast<char>(first_digit + '0'));
+ --digits_to_go;
+ if (state.precision > 0 || state.ShouldPrintDot()) {
+ state.sink->Append(1, '.');
+ }
+ state.sink->Append(std::min(digits_to_go, nines),
+ change_to_zeros ? '0' : '9');
+ digits_to_go -= std::min(digits_to_go, nines);
+ while (digits_to_go > 0) {
+ auto [digit_opt, curr_nines] = GetDigits(btd, digits_view);
+ if (!digit_opt.has_value()) break;
+ int digit = *digit_opt;
+ if (curr_nines + 1 < digits_to_go) {
+ state.sink->Append(1, static_cast<char>(digit + '0'));
+ state.sink->Append(curr_nines, '9');
+ digits_to_go -= curr_nines + 1;
+ } else {
+ bool need_round_up = false;
+ auto [next_digit_opt, next_nines] = GetDigits(btd, digits_view);
+ if (digits_to_go == 1) {
+ need_round_up = curr_nines > 0 || next_digit_opt > 4;
+ } else if (digits_to_go == curr_nines + 1) {
+ // Only round if next digit is > 4
+ need_round_up = next_digit_opt.value_or(0) > 4;
+ } else {
+ // we know we need to round since nine is after precision ends
+ need_round_up = true;
+ }
+ state.sink->Append(1,
+ static_cast<char>(digit + need_round_up + '0'));
+ state.sink->Append(digits_to_go - 1, need_round_up ? '0' : '9');
+ digits_to_go = 0;
+ }
+ }
+
+ if (digits_to_go > 0) {
+ state.sink->Append(digits_to_go, '0');
+ }
+ state.sink->Append(1, uppercase ? 'E' : 'e');
+ state.sink->Append(1, scientific_exp >= 0 ? '+' : '-');
+ if (scientific_exp < 10) {
+ state.sink->Append(1, '0');
+ }
+ state.sink->Append(absl::string_view(
+ exp_buffer, static_cast<size_t>(exp_buffer_end - exp_buffer)));
+ state.sink->Append(padding.right_spaces, ' ');
+ });
+}
+
template <typename Float>
bool FloatToSink(const Float v, const FormatConversionSpecImpl &conv,
FormatSinkImpl *sink) {
@@ -1371,14 +1862,10 @@
return true;
} else if (c == FormatConversionCharInternal::e ||
c == FormatConversionCharInternal::E) {
- if (!FloatToBuffer<FormatStyle::Precision>(decomposed, precision, &buffer,
- &exp)) {
- return FallbackToSnprintf(v, conv, sink);
- }
- if (!conv.has_alt_flag() && buffer.back() == '.') buffer.pop_back();
- PrintExponent(
- exp, FormatConversionCharIsUpper(conv.conversion_char()) ? 'E' : 'e',
- &buffer);
+ FormatE(decomposed.mantissa, decomposed.exponent,
+ FormatConversionCharIsUpper(conv.conversion_char()),
+ {sign_char, precision, conv, sink});
+ return true;
} else if (c == FormatConversionCharInternal::g ||
c == FormatConversionCharInternal::G) {
precision = std::max(precision, size_t{1}) - 1;
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index 3af74c2..41e9010 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -608,6 +608,7 @@
// Calls `mu->lock()` and returns when that call returns. That is, `*mu` is
// guaranteed to be locked when this object is constructed. Requires that
// `mu` be dereferenceable.
+ ABSL_REFACTOR_INLINE
explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: MutexLock(*mu) {}
@@ -620,6 +621,7 @@
this->mu_.LockWhen(cond);
}
+ ABSL_REFACTOR_INLINE
explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: MutexLock(*mu, cond) {}
@@ -647,6 +649,7 @@
mu.lock_shared();
}
+ ABSL_REFACTOR_INLINE
explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu)
: ReaderMutexLock(*mu) {}
@@ -656,6 +659,7 @@
mu.ReaderLockWhen(cond);
}
+ ABSL_REFACTOR_INLINE
explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
ABSL_SHARED_LOCK_FUNCTION(mu)
: ReaderMutexLock(*mu, cond) {}
@@ -683,6 +687,7 @@
mu.lock();
}
+ ABSL_REFACTOR_INLINE
explicit WriterMutexLock(Mutex* absl_nonnull mu)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: WriterMutexLock(*mu) {}
@@ -694,6 +699,7 @@
mu.WriterLockWhen(cond);
}
+ ABSL_REFACTOR_INLINE
explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: WriterMutexLock(*mu, cond) {}
@@ -1097,6 +1103,7 @@
this->mu_->lock();
}
+ ABSL_REFACTOR_INLINE
explicit ReleasableMutexLock(Mutex* absl_nonnull mu)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: ReleasableMutexLock(*mu) {}
@@ -1108,6 +1115,7 @@
this->mu_->LockWhen(cond);
}
+ ABSL_REFACTOR_INLINE
explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: ReleasableMutexLock(*mu, cond) {}
diff --git a/ci/linux_arm_clang-latest_libcxx_bazel.sh b/ci/linux_arm_clang-latest_libcxx_bazel.sh
index 631a8bd..5ad9fcc 100755
--- a/ci/linux_arm_clang-latest_libcxx_bazel.sh
+++ b/ci/linux_arm_clang-latest_libcxx_bazel.sh
@@ -88,6 +88,7 @@
--features=external_include_paths \
--keep_going \
--linkopt=-stdlib=libc++ \
+ --per_file_copt=external/.*@-w \
--show_timestamps \
--test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \
--test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \
diff --git a/ci/linux_clang-latest_libcxx_asan_bazel.sh b/ci/linux_clang-latest_libcxx_asan_bazel.sh
index cea10ff..1200ef0 100755
--- a/ci/linux_clang-latest_libcxx_asan_bazel.sh
+++ b/ci/linux_clang-latest_libcxx_asan_bazel.sh
@@ -93,6 +93,7 @@
--keep_going \
--linkopt=\"-fsanitize=address\" \
--linkopt=\"-fsanitize-link-c++-runtime\" \
+ --per_file_copt=external/.*@-w \
--show_timestamps \
--test_env=\"ASAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer\" \
--test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \
diff --git a/ci/linux_clang-latest_libcxx_bazel.sh b/ci/linux_clang-latest_libcxx_bazel.sh
index 5c51d15..74af10c 100755
--- a/ci/linux_clang-latest_libcxx_bazel.sh
+++ b/ci/linux_clang-latest_libcxx_bazel.sh
@@ -89,6 +89,7 @@
--enable_bzlmod=true \
--features=external_include_paths \
--keep_going \
+ --per_file_copt=external/.*@-w \
--show_timestamps \
--test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \
--test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \
diff --git a/ci/linux_clang-latest_libcxx_tsan_bazel.sh b/ci/linux_clang-latest_libcxx_tsan_bazel.sh
index c9ea22d..8634e8d 100755
--- a/ci/linux_clang-latest_libcxx_tsan_bazel.sh
+++ b/ci/linux_clang-latest_libcxx_tsan_bazel.sh
@@ -87,6 +87,7 @@
--features=external_include_paths \
--keep_going \
--linkopt=\"-fsanitize=thread\" \
+ --per_file_copt=external/.*@-w \
--show_timestamps \
--test_env=\"TSAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer\" \
--test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \
diff --git a/ci/linux_clang-latest_libstdcxx_bazel.sh b/ci/linux_clang-latest_libstdcxx_bazel.sh
index a1620e0..3175e41 100755
--- a/ci/linux_clang-latest_libstdcxx_bazel.sh
+++ b/ci/linux_clang-latest_libstdcxx_bazel.sh
@@ -85,6 +85,7 @@
--features=external_include_paths \
--keep_going \
--linkopt=\"--gcc-toolchain=/usr/local\" \
+ --per_file_copt=external/.*@-w \
--show_timestamps \
--test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \
--test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \
diff --git a/ci/linux_gcc-floor_libstdcxx_bazel.sh b/ci/linux_gcc-floor_libstdcxx_bazel.sh
index b683b60..0ee412d 100755
--- a/ci/linux_gcc-floor_libstdcxx_bazel.sh
+++ b/ci/linux_gcc-floor_libstdcxx_bazel.sh
@@ -81,6 +81,7 @@
--define=\"absl=1\" \
--features=external_include_paths \
--keep_going \
+ --per_file_copt=external/.*@-w \
--show_timestamps \
--test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \
--test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \
diff --git a/ci/linux_gcc-latest_libstdcxx_bazel.sh b/ci/linux_gcc-latest_libstdcxx_bazel.sh
index b092c1d..6e9b5ec 100755
--- a/ci/linux_gcc-latest_libstdcxx_bazel.sh
+++ b/ci/linux_gcc-latest_libstdcxx_bazel.sh
@@ -87,6 +87,7 @@
--enable_bzlmod=true \
--features=external_include_paths \
--keep_going \
+ --per_file_copt=external/.*@-w \
--show_timestamps \
--test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \
--test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \
diff --git a/ci/macos_xcode_bazel.sh b/ci/macos_xcode_bazel.sh
index 4e73847..a09b405 100755
--- a/ci/macos_xcode_bazel.sh
+++ b/ci/macos_xcode_bazel.sh
@@ -61,6 +61,7 @@
--enable_bzlmod=true \
--features=external_include_paths \
--keep_going \
+ --per_file_copt=external/.*@-w \
--show_timestamps \
--test_env="TZDIR=${ABSEIL_ROOT}/absl/time/internal/cctz/testdata/zoneinfo" \
--test_output=errors \
diff --git a/ci/windows_clangcl_bazel.bat b/ci/windows_clangcl_bazel.bat
index 26fd5af..94d27aa 100755
--- a/ci/windows_clangcl_bazel.bat
+++ b/ci/windows_clangcl_bazel.bat
@@ -59,6 +59,7 @@
--extra_execution_platforms=//:x64_windows-clang-cl ^
--extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows-clang-cl ^
--keep_going ^
+ --per_file_copt=external/.*@/w ^
--test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^
--test_env=TZDIR="%CD%\absl\time\internal\cctz\testdata\zoneinfo" ^
--test_output=errors ^
diff --git a/ci/windows_msvc_bazel.bat b/ci/windows_msvc_bazel.bat
index bbb57b4..6318ff1 100755
--- a/ci/windows_msvc_bazel.bat
+++ b/ci/windows_msvc_bazel.bat
@@ -50,6 +50,7 @@
--define=absl=1 ^
--enable_bzlmod=true ^
--keep_going ^
+ --per_file_copt=external/.*@/w ^
--test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^
--test_env=TZDIR="%CD%\absl\time\internal\cctz\testdata\zoneinfo" ^
--test_output=errors ^
diff --git a/patches/0005-mutex-deprecations.patch b/patches/0005-mutex-deprecations.patch
new file mode 100644
index 0000000..7811adc
--- /dev/null
+++ b/patches/0005-mutex-deprecations.patch
@@ -0,0 +1,130 @@
+diff --git a/third_party/abseil-cpp/absl/synchronization/mutex.h b/third_party/abseil-cpp/absl/synchronization/mutex.h
+index 39ea0d0f23e27..41e9010f7e76d 100644
+--- a/third_party/abseil-cpp/absl/synchronization/mutex.h
++++ b/third_party/abseil-cpp/absl/synchronization/mutex.h
+@@ -178,7 +178,6 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
+ // then acquires it exclusively. (This lock is also known as a "write lock.")
+ void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
+
+- ABSL_DEPRECATE_AND_INLINE()
+ inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); }
+
+ // Mutex::unlock()
+@@ -187,7 +186,6 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
+ // free state. Calling thread must hold the `Mutex` exclusively.
+ void unlock() ABSL_UNLOCK_FUNCTION();
+
+- ABSL_DEPRECATE_AND_INLINE()
+ inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); }
+
+ // Mutex::try_lock()
+@@ -197,7 +195,6 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
+ // probability if the `Mutex` was free.
+ [[nodiscard]] bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+
+- ABSL_DEPRECATE_AND_INLINE()
+ [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ return try_lock();
+ }
+@@ -252,7 +249,6 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
+ // lock on the mutex.
+ void lock_shared() ABSL_SHARED_LOCK_FUNCTION();
+
+- ABSL_DEPRECATE_AND_INLINE()
+ void ReaderLock() ABSL_SHARED_LOCK_FUNCTION() { lock_shared(); }
+
+ // Mutex::unlock_shared()
+@@ -262,7 +258,6 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
+ // Note that you cannot call `unlock_shared()` on a mutex held in write mode.
+ void unlock_shared() ABSL_UNLOCK_FUNCTION();
+
+- ABSL_DEPRECATE_AND_INLINE()
+ void ReaderUnlock() ABSL_UNLOCK_FUNCTION() { unlock_shared(); }
+
+ // Mutex::try_lock_shared()
+@@ -272,7 +267,6 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
+ // `true` with high probability if the `Mutex` was free or shared.
+ [[nodiscard]] bool try_lock_shared() ABSL_SHARED_TRYLOCK_FUNCTION(true);
+
+- ABSL_DEPRECATE_AND_INLINE()
+ [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true) {
+ return try_lock_shared();
+ }
+@@ -297,13 +291,10 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
+ // These methods may be used (along with the complementary `Reader*()`
+ // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
+ // etc.) from reader/writer lock usage.
+- ABSL_DEPRECATE_AND_INLINE()
+ void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); }
+
+- ABSL_DEPRECATE_AND_INLINE()
+ void WriterUnlock() ABSL_UNLOCK_FUNCTION() { unlock(); }
+
+- ABSL_DEPRECATE_AND_INLINE()
+ [[nodiscard]] bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ return try_lock();
+ }
+@@ -617,7 +608,6 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
+ // Calls `mu->lock()` and returns when that call returns. That is, `*mu` is
+ // guaranteed to be locked when this object is constructed. Requires that
+ // `mu` be dereferenceable.
+- [[deprecated("Use the constructor that takes a reference instead")]]
+ ABSL_REFACTOR_INLINE
+ explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : MutexLock(*mu) {}
+@@ -631,7 +621,6 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
+ this->mu_.LockWhen(cond);
+ }
+
+- [[deprecated("Use the constructor that takes a reference instead")]]
+ ABSL_REFACTOR_INLINE
+ explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+@@ -660,7 +649,6 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
+ mu.lock_shared();
+ }
+
+- [[deprecated("Use the constructor that takes a reference instead")]]
+ ABSL_REFACTOR_INLINE
+ explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu)
+ : ReaderMutexLock(*mu) {}
+@@ -671,7 +659,6 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
+ mu.ReaderLockWhen(cond);
+ }
+
+- [[deprecated("Use the constructor that takes a reference instead")]]
+ ABSL_REFACTOR_INLINE
+ explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
+ ABSL_SHARED_LOCK_FUNCTION(mu)
+@@ -700,7 +687,6 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
+ mu.lock();
+ }
+
+- [[deprecated("Use the constructor that takes a reference instead")]]
+ ABSL_REFACTOR_INLINE
+ explicit WriterMutexLock(Mutex* absl_nonnull mu)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+@@ -713,7 +699,6 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
+ mu.WriterLockWhen(cond);
+ }
+
+- [[deprecated("Use the constructor that takes a reference instead")]]
+ ABSL_REFACTOR_INLINE
+ explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+@@ -1118,7 +1103,6 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
+ this->mu_->lock();
+ }
+
+- [[deprecated("Use the constructor that takes a reference instead")]]
+ ABSL_REFACTOR_INLINE
+ explicit ReleasableMutexLock(Mutex* absl_nonnull mu)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+@@ -1131,7 +1115,6 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
+ this->mu_->LockWhen(cond);
+ }
+
+- [[deprecated("Use the constructor that takes a reference instead")]]
+ ABSL_REFACTOR_INLINE
+ explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
diff --git a/symbols_arm64_dbg.def b/symbols_arm64_dbg.def
index 6eab366..ff3f725 100644
--- a/symbols_arm64_dbg.def
+++ b/symbols_arm64_dbg.def
@@ -2797,6 +2797,7 @@
??_4uint128@absl@@QEAAAEAV01@V01@@Z
??_5uint128@absl@@QEAAAEAV01@V01@@Z
??_6uint128@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbsDuration@absl@@YA?AVDuration@1@V21@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
@@ -2936,6 +2937,7 @@
?Avail@FormatSinkImpl@str_format_internal@absl@@AEBA_KXZ
?Await@Mutex@absl@@QEAAXAEBVCondition@2@@Z
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -3381,6 +3383,7 @@
?GetData@?$AllocationTransaction@V?$allocator@PEAVLogSink@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QEAAAEAPEAPEAVLogSink@3@XZ
?GetData@?$AllocationTransaction@V?$allocator@UPayload@status_internal@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QEAAAEAPEAUPayload@status_internal@3@XZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_arm64_dbg_cxx23.def b/symbols_arm64_dbg_cxx23.def
index 5e76007..797b8db 100644
--- a/symbols_arm64_dbg_cxx23.def
+++ b/symbols_arm64_dbg_cxx23.def
@@ -2794,6 +2794,7 @@
??_4uint128@absl@@QEAAAEAV01@V01@@Z
??_5uint128@absl@@QEAAAEAV01@V01@@Z
??_6uint128@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbsDuration@absl@@YA?AVDuration@1@V21@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
@@ -2933,6 +2934,7 @@
?Avail@FormatSinkImpl@str_format_internal@absl@@AEBA_KXZ
?Await@Mutex@absl@@QEAAXAEBVCondition@2@@Z
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -3378,6 +3380,7 @@
?GetData@?$AllocationTransaction@V?$allocator@PEAVLogSink@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QEAAAEAPEAPEAVLogSink@3@XZ
?GetData@?$AllocationTransaction@V?$allocator@UPayload@status_internal@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QEAAAEAPEAUPayload@status_internal@3@XZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_arm64_rel.def b/symbols_arm64_rel.def
index 7573931..f9f9f98 100644
--- a/symbols_arm64_rel.def
+++ b/symbols_arm64_rel.def
@@ -356,6 +356,7 @@
??_0Duration@absl@@QEAAAEAV01@N@Z
??_0Duration@absl@@QEAAAEAV01@_J@Z
??_1Duration@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslParseFlag@absl@@YA_NV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAVDuration@1@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
@@ -443,6 +444,7 @@
?AtLocation@LogMessage@log_internal@absl@@QEAAAEAV123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@H@Z
?AtomicBufferValue@FlagImpl@flags_internal@absl@@AEBAPEAU?$atomic@_K@__Cr@std@@XZ
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -727,6 +729,7 @@
?GetCpuType@crc_internal@absl@@YA?AW4CpuType@12@XZ
?GetCurrentTimeNanos@absl@@YA_JXZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_arm64_rel_cxx23.def b/symbols_arm64_rel_cxx23.def
index 9528c13..2a7a529 100644
--- a/symbols_arm64_rel_cxx23.def
+++ b/symbols_arm64_rel_cxx23.def
@@ -356,6 +356,7 @@
??_0Duration@absl@@QEAAAEAV01@N@Z
??_0Duration@absl@@QEAAAEAV01@_J@Z
??_1Duration@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslParseFlag@absl@@YA_NV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAVDuration@1@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
@@ -443,6 +444,7 @@
?AtLocation@LogMessage@log_internal@absl@@QEAAAEAV123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@H@Z
?AtomicBufferValue@FlagImpl@flags_internal@absl@@AEBAPEAU?$atomic@_K@__Cr@std@@XZ
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -727,6 +729,7 @@
?GetCpuType@crc_internal@absl@@YA?AW4CpuType@12@XZ
?GetCurrentTimeNanos@absl@@YA_JXZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_x64_dbg.def b/symbols_x64_dbg.def
index fcaaa27..53fde21 100644
--- a/symbols_x64_dbg.def
+++ b/symbols_x64_dbg.def
@@ -2798,6 +2798,7 @@
??_4uint128@absl@@QEAAAEAV01@V01@@Z
??_5uint128@absl@@QEAAAEAV01@V01@@Z
??_6uint128@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbsDuration@absl@@YA?AVDuration@1@V21@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
@@ -2937,6 +2938,7 @@
?Avail@FormatSinkImpl@str_format_internal@absl@@AEBA_KXZ
?Await@Mutex@absl@@QEAAXAEBVCondition@2@@Z
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -3382,6 +3384,7 @@
?GetData@?$AllocationTransaction@V?$allocator@PEAVLogSink@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QEAAAEAPEAPEAVLogSink@3@XZ
?GetData@?$AllocationTransaction@V?$allocator@UPayload@status_internal@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QEAAAEAPEAUPayload@status_internal@3@XZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_x64_dbg_cxx23.def b/symbols_x64_dbg_cxx23.def
index 43e6433..145dccd 100644
--- a/symbols_x64_dbg_cxx23.def
+++ b/symbols_x64_dbg_cxx23.def
@@ -2795,6 +2795,7 @@
??_4uint128@absl@@QEAAAEAV01@V01@@Z
??_5uint128@absl@@QEAAAEAV01@V01@@Z
??_6uint128@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbsDuration@absl@@YA?AVDuration@1@V21@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
@@ -2934,6 +2935,7 @@
?Avail@FormatSinkImpl@str_format_internal@absl@@AEBA_KXZ
?Await@Mutex@absl@@QEAAXAEBVCondition@2@@Z
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -3379,6 +3381,7 @@
?GetData@?$AllocationTransaction@V?$allocator@PEAVLogSink@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QEAAAEAPEAPEAVLogSink@3@XZ
?GetData@?$AllocationTransaction@V?$allocator@UPayload@status_internal@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QEAAAEAPEAUPayload@status_internal@3@XZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_x64_rel.def b/symbols_x64_rel.def
index 46d1172..4eb85c2 100644
--- a/symbols_x64_rel.def
+++ b/symbols_x64_rel.def
@@ -355,6 +355,7 @@
??_0Duration@absl@@QEAAAEAV01@N@Z
??_0Duration@absl@@QEAAAEAV01@_J@Z
??_1Duration@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslParseFlag@absl@@YA_NV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAVDuration@1@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
@@ -443,6 +444,7 @@
?AtLocation@LogMessage@log_internal@absl@@QEAAAEAV123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@H@Z
?AtomicBufferValue@FlagImpl@flags_internal@absl@@AEBAPEAU?$atomic@_K@__Cr@std@@XZ
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -726,6 +728,7 @@
?GetCpuType@crc_internal@absl@@YA?AW4CpuType@12@XZ
?GetCurrentTimeNanos@absl@@YA_JXZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_x64_rel_asan.def b/symbols_x64_rel_asan.def
index 92d67f8..e9155ad 100644
--- a/symbols_x64_rel_asan.def
+++ b/symbols_x64_rel_asan.def
@@ -378,6 +378,7 @@
??_0Duration@absl@@QEAAAEAV01@N@Z
??_0Duration@absl@@QEAAAEAV01@_J@Z
??_1Duration@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslParseFlag@absl@@YA_NV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAVDuration@1@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
@@ -467,6 +468,7 @@
?AtLocation@LogMessage@log_internal@absl@@QEAAAEAV123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@H@Z
?AtomicBufferValue@FlagImpl@flags_internal@absl@@AEBAPEAU?$atomic@_K@__Cr@std@@XZ
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -753,6 +755,7 @@
?GetCpuType@crc_internal@absl@@YA?AW4CpuType@12@XZ
?GetCurrentTimeNanos@absl@@YA_JXZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_x64_rel_asan_cxx23.def b/symbols_x64_rel_asan_cxx23.def
index 4fc4911..76ed119 100644
--- a/symbols_x64_rel_asan_cxx23.def
+++ b/symbols_x64_rel_asan_cxx23.def
@@ -378,6 +378,7 @@
??_0Duration@absl@@QEAAAEAV01@N@Z
??_0Duration@absl@@QEAAAEAV01@_J@Z
??_1Duration@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslParseFlag@absl@@YA_NV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAVDuration@1@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
@@ -467,6 +468,7 @@
?AtLocation@LogMessage@log_internal@absl@@QEAAAEAV123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@H@Z
?AtomicBufferValue@FlagImpl@flags_internal@absl@@AEBAPEAU?$atomic@_K@__Cr@std@@XZ
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -753,6 +755,7 @@
?GetCpuType@crc_internal@absl@@YA?AW4CpuType@12@XZ
?GetCurrentTimeNanos@absl@@YA_JXZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_x64_rel_cxx23.def b/symbols_x64_rel_cxx23.def
index 53cf3d0..32e28ca 100644
--- a/symbols_x64_rel_cxx23.def
+++ b/symbols_x64_rel_cxx23.def
@@ -355,6 +355,7 @@
??_0Duration@absl@@QEAAAEAV01@N@Z
??_0Duration@absl@@QEAAAEAV01@_J@Z
??_1Duration@absl@@QEAAAEAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MEAAPEAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPEAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslParseFlag@absl@@YA_NV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAVDuration@1@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
@@ -443,6 +444,7 @@
?AtLocation@LogMessage@log_internal@absl@@QEAAAEAV123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@H@Z
?AtomicBufferValue@FlagImpl@flags_internal@absl@@AEBAPEAU?$atomic@_K@__Cr@std@@XZ
?AwaitCommon@Mutex@absl@@AEAA_NAEBVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPEBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PEAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YA_KPEBE_KPEAD1PEBD_N@Z
@@ -726,6 +728,7 @@
?GetCpuType@crc_internal@absl@@YA?AW4CpuType@12@XZ
?GetCurrentTimeNanos@absl@@YA_JXZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQEBQEAXHP6AXPEBDPEAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPEAX_K@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YA_KPEBUCordRep@12@@Z
diff --git a/symbols_x86_dbg.def b/symbols_x86_dbg.def
index 32aaa6d..361bc09 100644
--- a/symbols_x86_dbg.def
+++ b/symbols_x86_dbg.def
@@ -2796,6 +2796,7 @@
??_4uint128@absl@@QAEAAV01@V01@@Z
??_5uint128@absl@@QAEAAV01@V01@@Z
??_6uint128@absl@@QAEAAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MAEPAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbsDuration@absl@@YA?AVDuration@1@V21@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
@@ -2935,6 +2936,7 @@
?Avail@FormatSinkImpl@str_format_internal@absl@@ABEIXZ
?Await@Mutex@absl@@QAEXABVCondition@2@@Z
?AwaitCommon@Mutex@absl@@AAE_NABVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YAIPBEIPADIPBD_N@Z
@@ -3380,6 +3382,7 @@
?GetData@?$AllocationTransaction@V?$allocator@PAVLogSink@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QAEAAPAPAVLogSink@3@XZ
?GetData@?$AllocationTransaction@V?$allocator@UPayload@status_internal@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QAEAAPAUPayload@status_internal@3@XZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQBQAXHP6AXPBDPAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQBQAXHP6AXPBDPAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPAXI@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YAIPBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YAIPBUCordRep@12@@Z
diff --git a/symbols_x86_dbg_cxx23.def b/symbols_x86_dbg_cxx23.def
index f360871..e3f617d 100644
--- a/symbols_x86_dbg_cxx23.def
+++ b/symbols_x86_dbg_cxx23.def
@@ -2793,6 +2793,7 @@
??_4uint128@absl@@QAEAAV01@V01@@Z
??_5uint128@absl@@QAEAAV01@V01@@Z
??_6uint128@absl@@QAEAAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MAEPAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbsDuration@absl@@YA?AVDuration@1@V21@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
@@ -2932,6 +2933,7 @@
?Avail@FormatSinkImpl@str_format_internal@absl@@ABEIXZ
?Await@Mutex@absl@@QAEXABVCondition@2@@Z
?AwaitCommon@Mutex@absl@@AAE_NABVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YAIPBEIPADIPBD_N@Z
@@ -3377,6 +3379,7 @@
?GetData@?$AllocationTransaction@V?$allocator@PAVLogSink@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QAEAAPAPAVLogSink@3@XZ
?GetData@?$AllocationTransaction@V?$allocator@UPayload@status_internal@absl@@@__Cr@std@@@inlined_vector_internal@absl@@QAEAAPAUPayload@status_internal@3@XZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQBQAXHP6AXPBDPAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQBQAXHP6AXPBDPAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPAXI@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YAIPBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YAIPBUCordRep@12@@Z
diff --git a/symbols_x86_rel.def b/symbols_x86_rel.def
index d4ee2c6..6c7c554 100644
--- a/symbols_x86_rel.def
+++ b/symbols_x86_rel.def
@@ -355,6 +355,7 @@
??_0Duration@absl@@QAEAAV01@N@Z
??_0Duration@absl@@QAEAAV01@_J@Z
??_1Duration@absl@@QAEAAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MAEPAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslParseFlag@absl@@YA_NV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PAVDuration@1@PAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
@@ -442,6 +443,7 @@
?AtLocation@LogMessage@log_internal@absl@@QAEAAV123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@H@Z
?AtomicBufferValue@FlagImpl@flags_internal@absl@@ABEPAU?$atomic@_K@__Cr@std@@XZ
?AwaitCommon@Mutex@absl@@AAE_NABVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YAIPBEIPADIPBD_N@Z
@@ -726,6 +728,7 @@
?GetCpuType@crc_internal@absl@@YA?AW4CpuType@12@XZ
?GetCurrentTimeNanos@absl@@YA_JXZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQBQAXHP6AXPBDPAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQBQAXHP6AXPBDPAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPAXI@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YAIPBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YAIPBUCordRep@12@@Z
diff --git a/symbols_x86_rel_cxx23.def b/symbols_x86_rel_cxx23.def
index 16a4238..a2b04d9 100644
--- a/symbols_x86_rel_cxx23.def
+++ b/symbols_x86_rel_cxx23.def
@@ -355,6 +355,7 @@
??_0Duration@absl@@QAEAAV01@N@Z
??_0Duration@absl@@QAEAAV01@_J@Z
??_1Duration@absl@@QAEAAV01@V01@@Z
+ ??_ECordzHandle@cord_internal@absl@@MAEPAXI@Z
?AbortedError@absl@@YA?AVStatus@1@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslFormatFlush@detect_specialization@log_internal@absl@@YAXPAVStringifySink@123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@@Z
?AbslParseFlag@absl@@YA_NV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PAVDuration@1@PAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
@@ -442,6 +443,7 @@
?AtLocation@LogMessage@log_internal@absl@@QAEAAV123@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@H@Z
?AtomicBufferValue@FlagImpl@flags_internal@absl@@ABEPAU?$atomic@_K@__Cr@std@@XZ
?AwaitCommon@Mutex@absl@@AAE_NABVCondition@2@VKernelTimeout@synchronization_internal@2@@Z
+ ?BadDownCastCrash@base_internal@absl@@YAXPBD0@Z
?Base64Escape@absl@@YA?AV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@__Cr@std@@V?$basic_string_view@DU?$char_traits@D@__Cr@std@@@34@@Z
?Base64Escape@absl@@YAXV?$basic_string_view@DU?$char_traits@D@__Cr@std@@@__Cr@std@@PAV?$basic_string@DU?$char_traits@D@__Cr@std@@V?$allocator@D@23@@34@@Z
?Base64EscapeInternal@strings_internal@absl@@YAIPBEIPADIPBD_N@Z
@@ -726,6 +728,7 @@
?GetCpuType@crc_internal@absl@@YA?AW4CpuType@12@XZ
?GetCurrentTimeNanos@absl@@YA_JXZ
?GetDebugStackTraceHook@debugging_internal@absl@@YAP6AXQBQAXHP6AXPBDPAX@Z2@ZXZ
+ ?GetDebugStackTraceHookLegacy@debugging_internal@absl@@YAP6AXQBQAXHP6AXPBDPAX@Z2@ZXZ
?GetEntropyFromRandenPool@random_internal@absl@@YAXPAXI@Z
?GetEstimatedFairShareMemoryUsage@cord_internal@absl@@YAIPBUCordRep@12@@Z
?GetEstimatedMemoryUsage@cord_internal@absl@@YAIPBUCordRep@12@@Z