Roll abseil_revision a6c17c7e84..322ae2420d

Change Log:
https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+log/a6c17c7e84..322ae2420d
Full diff:
https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+/a6c17c7e84..322ae2420d

No changes to .def files.

Bug: None
Change-Id: I512dc2a70354d72feb8a8af825f27b4037470367
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2626438
Reviewed-by: Danil Chapovalov <danilchap@chromium.org>
Commit-Queue: Mirko Bonadei <mbonadei@chromium.org>
Cr-Commit-Position: refs/heads/master@{#843032}
GitOrigin-RevId: d15b7e813a94aa99a8c353d97d4476816681f07d
diff --git a/README.chromium b/README.chromium
index a1ac0d2..4bff005 100644
--- a/README.chromium
+++ b/README.chromium
@@ -4,7 +4,7 @@
 License: Apache 2.0
 License File: LICENSE
 Version: 0
-Revision: a6c17c7e8430f204b1c330acf1688fbcd8abebfa
+Revision: 322ae2420d27fc96d0a8ab1167d7de33671048df
 Security Critical: yes
 
 Description:
diff --git a/absl/base/config.h b/absl/base/config.h
index 3f7f32b..f6ddf0c 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -379,6 +379,15 @@
 #define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
 #endif
 
+// ABSL_HAVE_SCHED_GETCPU
+//
+// Checks whether sched_getcpu is available.
+#ifdef ABSL_HAVE_SCHED_GETCPU
+#error ABSL_HAVE_SCHED_GETCPU cannot be directly set
+#elif defined(__linux__)
+#define ABSL_HAVE_SCHED_GETCPU 1
+#endif
+
 // ABSL_HAVE_SCHED_YIELD
 //
 // Checks whether the platform implements sched_yield(2) as defined in
@@ -490,7 +499,7 @@
 #endif
 
 #ifdef __has_include
-#if __has_include(<any>) && __cplusplus >= 201703L && \
+#if __has_include(<any>) && defined(__cplusplus) && __cplusplus >= 201703L && \
     !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
 #define ABSL_HAVE_STD_ANY 1
 #endif
@@ -504,8 +513,8 @@
 #endif
 
 #ifdef __has_include
-#if __has_include(<optional>) && __cplusplus >= 201703L && \
-    !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#if __has_include(<optional>) && defined(__cplusplus) && \
+    __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
 #define ABSL_HAVE_STD_OPTIONAL 1
 #endif
 #endif
@@ -518,8 +527,8 @@
 #endif
 
 #ifdef __has_include
-#if __has_include(<variant>) && __cplusplus >= 201703L && \
-    !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#if __has_include(<variant>) && defined(__cplusplus) && \
+    __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
 #define ABSL_HAVE_STD_VARIANT 1
 #endif
 #endif
@@ -532,7 +541,8 @@
 #endif
 
 #ifdef __has_include
-#if __has_include(<string_view>) && __cplusplus >= 201703L
+#if __has_include(<string_view>) && defined(__cplusplus) && \
+    __cplusplus >= 201703L
 #define ABSL_HAVE_STD_STRING_VIEW 1
 #endif
 #endif
@@ -544,8 +554,9 @@
 // not correctly set by MSVC, so we use `_MSVC_LANG` to check the language
 // version.
 // TODO(zhangxy): fix tests before enabling aliasing for `std::any`.
-#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
-    ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402)
+#if defined(_MSC_VER) && _MSC_VER >= 1910 &&         \
+    ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \
+     (defined(__cplusplus) && __cplusplus > 201402))
 // #define ABSL_HAVE_STD_ANY 1
 #define ABSL_HAVE_STD_OPTIONAL 1
 #define ABSL_HAVE_STD_VARIANT 1
diff --git a/absl/debugging/failure_signal_handler.cc b/absl/debugging/failure_signal_handler.cc
index 5d13bdb..a9ed6ef 100644
--- a/absl/debugging/failure_signal_handler.cc
+++ b/absl/debugging/failure_signal_handler.cc
@@ -21,6 +21,7 @@
 #ifdef _WIN32
 #include <windows.h>
 #else
+#include <sched.h>
 #include <unistd.h>
 #endif
 
@@ -219,17 +220,24 @@
   absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data));
 }
 
-static void WriteSignalMessage(int signo, void (*writerfn)(const char*)) {
-  char buf[64];
+static void WriteSignalMessage(int signo, int cpu,
+                               void (*writerfn)(const char*)) {
+  char buf[96];
+  char on_cpu[32] = {0};
+  if (cpu != -1) {
+    snprintf(on_cpu, sizeof(on_cpu), " on cpu %d", cpu);
+  }
   const char* const signal_string =
       debugging_internal::FailureSignalToString(signo);
   if (signal_string != nullptr && signal_string[0] != '\0') {
-    snprintf(buf, sizeof(buf), "*** %s received at time=%ld ***\n",
+    snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n",
              signal_string,
-             static_cast<long>(time(nullptr)));  // NOLINT(runtime/int)
+             static_cast<long>(time(nullptr)),   // NOLINT(runtime/int)
+             on_cpu);
   } else {
-    snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld ***\n",
-             signo, static_cast<long>(time(nullptr)));  // NOLINT(runtime/int)
+    snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n",
+             signo, static_cast<long>(time(nullptr)),  // NOLINT(runtime/int)
+             on_cpu);
   }
   writerfn(buf);
 }
@@ -269,10 +277,10 @@
 // Called by AbslFailureSignalHandler() to write the failure info. It is
 // called once with writerfn set to WriteToStderr() and then possibly
 // with writerfn set to the user provided function.
-static void WriteFailureInfo(int signo, void* ucontext,
+static void WriteFailureInfo(int signo, void* ucontext, int cpu,
                              void (*writerfn)(const char*)) {
   WriterFnStruct writerfn_struct{writerfn};
-  WriteSignalMessage(signo, writerfn);
+  WriteSignalMessage(signo, cpu, writerfn);
   WriteStackTrace(ucontext, fsh_options.symbolize_stacktrace, WriterFnWrapper,
                   &writerfn_struct);
 }
@@ -334,6 +342,14 @@
     }
   }
 
+  // Increase the chance that the CPU we report was the same CPU on which the
+  // signal was received by doing this as early as possible, i.e. after
+  // verifying that this is not a recursive signal handler invocation.
+  int my_cpu = -1;
+#ifdef ABSL_HAVE_SCHED_GETCPU
+  my_cpu = sched_getcpu();
+#endif
+
 #ifdef ABSL_HAVE_ALARM
   // Set an alarm to abort the program in case this code hangs or deadlocks.
   if (fsh_options.alarm_on_failure_secs > 0) {
@@ -344,12 +360,12 @@
 #endif
 
   // First write to stderr.
-  WriteFailureInfo(signo, ucontext, WriteToStderr);
+  WriteFailureInfo(signo, ucontext, my_cpu, WriteToStderr);
 
   // Riskier code (because it is less likely to be async-signal-safe)
   // goes after this point.
   if (fsh_options.writerfn != nullptr) {
-    WriteFailureInfo(signo, ucontext, fsh_options.writerfn);
+    WriteFailureInfo(signo, ucontext, my_cpu, fsh_options.writerfn);
   }
 
   if (fsh_options.call_previous_handler) {
diff --git a/absl/debugging/failure_signal_handler_test.cc b/absl/debugging/failure_signal_handler_test.cc
index d8283b2..6a62428 100644
--- a/absl/debugging/failure_signal_handler_test.cc
+++ b/absl/debugging/failure_signal_handler_test.cc
@@ -122,6 +122,12 @@
           "*** ", absl::debugging_internal::FailureSignalToString(signo),
           " received at ")));
 
+  // On platforms where it is possible to get the current CPU, the
+  // CPU number is also logged. Check that it is present in output.
+#if defined(__linux__)
+  EXPECT_THAT(error_line, testing::HasSubstr(" on cpu "));
+#endif
+
   if (absl::debugging_internal::StackTraceWorksForTest()) {
     std::getline(error_output, error_line);
     EXPECT_THAT(error_line, StartsWith("PC: "));
diff --git a/absl/flags/flag.h b/absl/flags/flag.h
index a9cb2b7..f09580b 100644
--- a/absl/flags/flag.h
+++ b/absl/flags/flag.h
@@ -301,13 +301,15 @@
 #if ABSL_FLAGS_STRIP_NAMES
 #define ABSL_FLAG_IMPL_FLAGNAME(txt) ""
 #define ABSL_FLAG_IMPL_FILENAME() ""
-#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
-  absl::flags_internal::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(flag))
+#define ABSL_FLAG_IMPL_REGISTRAR(T, flag)                                      \
+  absl::flags_internal::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
+                                                nullptr)
 #else
 #define ABSL_FLAG_IMPL_FLAGNAME(txt) txt
 #define ABSL_FLAG_IMPL_FILENAME() __FILE__
-#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
-  absl::flags_internal::FlagRegistrar<T, true>(ABSL_FLAG_IMPL_FLAG_PTR(flag))
+#define ABSL_FLAG_IMPL_REGISTRAR(T, flag)                                     \
+  absl::flags_internal::FlagRegistrar<T, true>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
+                                               __FILE__)
 #endif
 
 // ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP
diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc
index 72507b9..6912b54 100644
--- a/absl/flags/flag_test.cc
+++ b/absl/flags/flag_test.cc
@@ -177,7 +177,7 @@
   EXPECT_EQ(absl::GetFlagReflectionHandle(f1).Help(), "literal help");
   EXPECT_EQ(absl::GetFlagReflectionHandle(f1).Filename(), "file");
 
-  flags::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(f2))
+  flags::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(f2), nullptr)
       .OnUpdate(TestCallback);
 
   EXPECT_EQ(absl::GetFlagReflectionHandle(f2).Name(), "f2");
diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h
index 8354814..e6bade0 100644
--- a/absl/flags/internal/flag.h
+++ b/absl/flags/internal/flag.h
@@ -721,8 +721,9 @@
 template <typename T, bool do_register>
 class FlagRegistrar {
  public:
-  explicit FlagRegistrar(Flag<T>& flag) : flag_(flag) {
-    if (do_register) flags_internal::RegisterCommandLineFlag(flag_.impl_);
+  explicit FlagRegistrar(Flag<T>& flag, const char* filename) : flag_(flag) {
+    if (do_register)
+      flags_internal::RegisterCommandLineFlag(flag_.impl_, filename);
   }
 
   FlagRegistrar OnUpdate(FlagCallbackFunc cb) && {
diff --git a/absl/flags/internal/registry.h b/absl/flags/internal/registry.h
index a8d9eb9..4b68c85 100644
--- a/absl/flags/internal/registry.h
+++ b/absl/flags/internal/registry.h
@@ -36,7 +36,7 @@
 
 //-----------------------------------------------------------------------------
 
-bool RegisterCommandLineFlag(CommandLineFlag&);
+bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename);
 
 void FinalizeRegistry();
 
diff --git a/absl/flags/internal/sequence_lock_test.cc b/absl/flags/internal/sequence_lock_test.cc
index 9aff1ed..ff8b476 100644
--- a/absl/flags/internal/sequence_lock_test.cc
+++ b/absl/flags/internal/sequence_lock_test.cc
@@ -13,6 +13,7 @@
 // limitations under the License.
 #include "absl/flags/internal/sequence_lock.h"
 
+#include <algorithm>
 #include <atomic>
 #include <thread>  // NOLINT(build/c++11)
 #include <tuple>
@@ -112,13 +113,21 @@
   return result;
 }
 
-INSTANTIATE_TEST_SUITE_P(TestManyByteSizes, ConcurrentSequenceLockTest,
-                         testing::Combine(
-                             // Buffer size (bytes).
-                             testing::Range(1, 128),
-                             // Number of reader threads.
-                             testing::ValuesIn(MultiplicativeRange(
-                                 1, absl::base_internal::NumCPUs(), 2))));
+#ifndef ABSL_HAVE_THREAD_SANITIZER
+const int kMaxThreads = absl::base_internal::NumCPUs();
+#else
+// With TSAN, a lot of threads contending for atomic access on the sequence
+// lock make this test run too slowly.
+const int kMaxThreads = std::min(absl::base_internal::NumCPUs(), 4);
+#endif
+
+INSTANTIATE_TEST_SUITE_P(
+    TestManyByteSizes, ConcurrentSequenceLockTest,
+    testing::Combine(
+        // Buffer size (bytes).
+        testing::Range(1, 128),
+        // Number of reader threads.
+        testing::ValuesIn(MultiplicativeRange(1, kMaxThreads, 2))));
 
 // Simple single-threaded test, parameterized by the size of the buffer to be
 // protected.
diff --git a/absl/flags/reflection.cc b/absl/flags/reflection.cc
index c976d46..0c76110 100644
--- a/absl/flags/reflection.cc
+++ b/absl/flags/reflection.cc
@@ -50,7 +50,7 @@
   ~FlagRegistry() = default;
 
   // Store a flag in this registry. Takes ownership of *flag.
-  void RegisterFlag(CommandLineFlag& flag);
+  void RegisterFlag(CommandLineFlag& flag, const char* filename);
 
   void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(lock_) { lock_.Lock(); }
   void Unlock() ABSL_UNLOCK_FUNCTION(lock_) { lock_.Unlock(); }
@@ -110,7 +110,20 @@
   return it != flags_.end() ? it->second : nullptr;
 }
 
-void FlagRegistry::RegisterFlag(CommandLineFlag& flag) {
+void FlagRegistry::RegisterFlag(CommandLineFlag& flag, const char* filename) {
+  if (filename != nullptr &&
+      flag.Filename() != GetUsageConfig().normalize_filename(filename)) {
+    flags_internal::ReportUsageError(
+        absl::StrCat(
+            "Inconsistency between flag object and registration for flag '",
+            flag.Name(),
+            "', likely due to duplicate flags or an ODR violation. Relevant "
+            "files: ",
+            flag.Filename(), " and ", filename),
+        true);
+    std::exit(1);
+  }
+
   FlagRegistryLock registry_lock(*this);
 
   std::pair<FlagIterator, bool> ins =
@@ -175,8 +188,8 @@
 
 // --------------------------------------------------------------------
 
-bool RegisterCommandLineFlag(CommandLineFlag& flag) {
-  FlagRegistry::GlobalRegistry().RegisterFlag(flag);
+bool RegisterCommandLineFlag(CommandLineFlag& flag, const char* filename) {
+  FlagRegistry::GlobalRegistry().RegisterFlag(flag, filename);
   return true;
 }
 
@@ -266,7 +279,7 @@
   static_assert(alignof(RetiredFlagObj) == kRetiredFlagObjAlignment, "");
   auto* flag = ::new (static_cast<void*>(buf))
       flags_internal::RetiredFlagObj(name, type_id);
-  FlagRegistry::GlobalRegistry().RegisterFlag(*flag);
+  FlagRegistry::GlobalRegistry().RegisterFlag(*flag, nullptr);
 }
 
 // --------------------------------------------------------------------
diff --git a/absl/numeric/bits_test.cc b/absl/numeric/bits_test.cc
index 8bf7bc9..7c942aa 100644
--- a/absl/numeric/bits_test.cc
+++ b/absl/numeric/bits_test.cc
@@ -560,6 +560,14 @@
   }
 }
 
+// On GCC and Clang, anticiapte that implementations will be constexpr
+#if defined(__GNUC__)
+static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT,
+              "popcount should be constexpr");
+static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CLZ, "clz should be constexpr");
+static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CTZ, "ctz should be constexpr");
+#endif
+
 }  // namespace
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/numeric/internal/bits.h b/absl/numeric/internal/bits.h
index af45700..e51941d 100644
--- a/absl/numeric/internal/bits.h
+++ b/absl/numeric/internal/bits.h
@@ -28,8 +28,15 @@
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 
-#if ABSL_HAVE_BUILTIN(__builtin_popcountl) && \
-    ABSL_HAVE_BUILTIN(__builtin_popcountll)
+#if defined(__GNUC__) && !defined(__clang__)
+// GCC
+#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) 1
+#else
+#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) ABSL_HAVE_BUILTIN(x)
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountl) && \
+    ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
 #define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr
 #define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1
 #else
@@ -37,7 +44,8 @@
 #define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0
 #endif
 
-#if ABSL_HAVE_BUILTIN(__builtin_clz) && ABSL_HAVE_BUILTIN(__builtin_clzll)
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) && \
+    ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
 #define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr
 #define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1
 #else
@@ -45,7 +53,8 @@
 #define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0
 #endif
 
-#if ABSL_HAVE_BUILTIN(__builtin_ctz) && ABSL_HAVE_BUILTIN(__builtin_ctzll)
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) && \
+    ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
 #define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr
 #define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1
 #else
@@ -85,7 +94,7 @@
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
 Popcount32(uint32_t x) noexcept {
-#if ABSL_HAVE_BUILTIN(__builtin_popcount)
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcount)
   static_assert(sizeof(unsigned int) == sizeof(x),
                 "__builtin_popcount does not take 32-bit arg");
   return __builtin_popcount(x);
@@ -98,7 +107,7 @@
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
 Popcount64(uint64_t x) noexcept {
-#if ABSL_HAVE_BUILTIN(__builtin_popcountll)
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
   static_assert(sizeof(unsigned long long) == sizeof(x),  // NOLINT(runtime/int)
                 "__builtin_popcount does not take 64-bit arg");
   return __builtin_popcountll(x);
@@ -122,7 +131,7 @@
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
 CountLeadingZeroes32(uint32_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_clz)
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz)
   // Use __builtin_clz, which uses the following instructions:
   //  x86: bsr, lzcnt
   //  ARM64: clz
@@ -169,7 +178,7 @@
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
 CountLeadingZeroes64(uint64_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_clzll)
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
   // Use __builtin_clzll, which uses the following instructions:
   //  x86: bsr, lzcnt
   //  ARM64: clz
@@ -240,7 +249,7 @@
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
 CountTrailingZeroesNonzero32(uint32_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_ctz)
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz)
   static_assert(sizeof(unsigned int) == sizeof(x),
                 "__builtin_ctz does not take 32-bit arg");
   return __builtin_ctz(x);
@@ -262,7 +271,7 @@
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
 CountTrailingZeroesNonzero64(uint64_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_ctzll)
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
   static_assert(sizeof(unsigned long long) == sizeof(x),  // NOLINT(runtime/int)
                 "__builtin_ctzll does not take 64-bit arg");
   return __builtin_ctzll(x);
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 82b631a..7e66a7d 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -761,11 +761,13 @@
   synch_deadlock_detection.store(mode, std::memory_order_release);
 }
 
-// Return true iff threads x and y are waiting on the same condition for the
-// same type of lock.  Requires that x and y be waiting on the same Mutex
-// queue.
-static bool MuSameCondition(PerThreadSynch *x, PerThreadSynch *y) {
-  return x->waitp->how == y->waitp->how &&
+// Return true iff threads x and y are part of the same equivalence
+// class of waiters. An equivalence class is defined as the set of
+// waiters with the same condition, type of lock, and thread priority.
+//
+// Requires that x and y be waiting on the same Mutex queue.
+static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+  return x->waitp->how == y->waitp->how && x->priority == y->priority &&
          Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
 }
 
@@ -784,18 +786,19 @@
 //     - invalid (iff x is not in a Mutex wait queue),
 //     - null, or
 //     - a pointer to a distinct thread waiting later in the same Mutex queue
-//       such that all threads in [x, x->skip] have the same condition and
-//       lock type (MuSameCondition() is true for all pairs in [x, x->skip]).
+//       such that all threads in [x, x->skip] have the same condition, priority
+//       and lock type (MuEquivalentWaiter() is true for all pairs in [x,
+//       x->skip]).
 // In addition, if x->skip is  valid, (x->may_skip || x->skip == null)
 //
-// By the spec of MuSameCondition(), it is not necessary when removing the
+// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
 // first runnable thread y from the front a Mutex queue to adjust the skip
 // field of another thread x because if x->skip==y, x->skip must (have) become
 // invalid before y is removed.  The function TryRemove can remove a specified
 // thread from an arbitrary position in the queue whether runnable or not, so
 // it fixes up skip fields that would otherwise be left dangling.
 // The statement
-//     if (x->may_skip && MuSameCondition(x, x->next)) { x->skip = x->next; }
+//     if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
 // maintains the invariant provided x is not the last waiter in a Mutex queue
 // The statement
 //          if (x->skip != null) { x->skip = x->skip->skip; }
@@ -929,24 +932,17 @@
     if (s->priority > head->priority) {  // s's priority is above head's
       // try to put s in priority-fifo order, or failing that at the front.
       if (!head->maybe_unlocking) {
-        // No unlocker can be scanning the queue, so we can insert between
-        // skip-chains, and within a skip-chain if it has the same condition as
-        // s.  We insert in priority-fifo order, examining the end of every
-        // skip-chain, plus every element with the same condition as s.
+        // No unlocker can be scanning the queue, so we can insert into the
+        // middle of the queue.
+        //
+        // Within a skip chain, all waiters have the same priority, so we can
+        // skip forward through the chains until we find one with a lower
+        // priority than the waiter to be enqueued.
         PerThreadSynch *advance_to = head;    // next value of enqueue_after
-        PerThreadSynch *cur;                  // successor of enqueue_after
         do {
           enqueue_after = advance_to;
-          cur = enqueue_after->next;  // this advance ensures progress
-          advance_to = Skip(cur);   // normally, advance to end of skip chain
-                                    // (side-effect: optimizes skip chain)
-          if (advance_to != cur && s->priority > advance_to->priority &&
-              MuSameCondition(s, cur)) {
-            // but this skip chain is not a singleton, s has higher priority
-            // than its tail and has the same condition as the chain,
-            // so we can insert within the skip-chain
-            advance_to = cur;         // advance by just one
-          }
+          // (side-effect: optimizes skip chain)
+          advance_to = Skip(enqueue_after->next);
         } while (s->priority <= advance_to->priority);
               // termination guaranteed because s->priority > head->priority
               // and head is the end of a skip chain
@@ -965,21 +961,21 @@
 
       // enqueue_after can be: head, Skip(...), or cur.
       // The first two imply enqueue_after->skip == nullptr, and
-      // the last is used only if MuSameCondition(s, cur).
+      // the last is used only if MuEquivalentWaiter(s, cur).
       // We require this because clearing enqueue_after->skip
       // is impossible; enqueue_after's predecessors might also
       // incorrectly skip over s if we were to allow other
       // insertion points.
-      ABSL_RAW_CHECK(
-          enqueue_after->skip == nullptr || MuSameCondition(enqueue_after, s),
-          "Mutex Enqueue failure");
+      ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
+                         MuEquivalentWaiter(enqueue_after, s),
+                     "Mutex Enqueue failure");
 
       if (enqueue_after != head && enqueue_after->may_skip &&
-          MuSameCondition(enqueue_after, enqueue_after->next)) {
+          MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
         // enqueue_after can skip to its new successor, s
         enqueue_after->skip = enqueue_after->next;
       }
-      if (MuSameCondition(s, s->next)) {  // s->may_skip is known to be true
+      if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
         s->skip = s->next;                // s may skip to its successor
       }
     } else {   // enqueue not done any other way, so
@@ -989,7 +985,7 @@
       head->next = s;
       s->readers = head->readers;  // reader count is from previous head
       s->maybe_unlocking = head->maybe_unlocking;  // same for unlock hint
-      if (head->may_skip && MuSameCondition(head, s)) {
+      if (head->may_skip && MuEquivalentWaiter(head, s)) {
         // head now has successor; may skip
         head->skip = s;
       }
@@ -1009,7 +1005,7 @@
   pw->next = w->next;         // snip w out of list
   if (head == w) {            // we removed the head
     head = (pw == w) ? nullptr : pw;  // either emptied list, or pw is new head
-  } else if (pw != head && MuSameCondition(pw, pw->next)) {
+  } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
     // pw can skip to its new successor
     if (pw->next->skip !=
         nullptr) {  // either skip to its successors skip target
@@ -1079,11 +1075,13 @@
       PerThreadSynch *w;
       if ((w = pw->next) != s) {  // search for thread,
         do {                      // processing at least one element
-          if (!MuSameCondition(s, w)) {  // seeking different condition
+          // If the current element isn't equivalent to the waiter to be
+          // removed, we can skip the entire chain.
+          if (!MuEquivalentWaiter(s, w)) {
             pw = Skip(w);                // so skip all that won't match
             // we don't have to worry about dangling skip fields
             // in the threads we skipped; none can point to s
-            // because their condition differs from s
+            // because they are in a different equivalence class.
           } else {          // seeking same condition
             FixSkip(w, s);  // fix up any skip pointer from w to s
             pw = w;
@@ -1374,7 +1372,9 @@
           len += static_cast<int>(strlen(&b->buf[len]));
         }
       }
-      ABSL_RAW_LOG(ERROR, "Acquiring %p    Mutexes held: %s",
+      ABSL_RAW_LOG(ERROR,
+                   "Acquiring absl::Mutex %p while holding %s; a cycle in the "
+                   "historical lock ordering graph has been observed",
                    static_cast<void *>(mu), b->buf);
       ABSL_RAW_LOG(ERROR, "Cycle: ");
       int path_len = deadlock_graph->FindPath(
@@ -2148,7 +2148,7 @@
           !old_h->may_skip) {                  // we used old_h as a terminator
         old_h->may_skip = true;                // allow old_h to skip once more
         ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
-        if (h != old_h && MuSameCondition(old_h, old_h->next)) {
+        if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
           old_h->skip = old_h->next;  // old_h not head & can skip to successor
         }
       }
diff --git a/absl/synchronization/mutex_benchmark.cc b/absl/synchronization/mutex_benchmark.cc
index 933ea14..e35aed8 100644
--- a/absl/synchronization/mutex_benchmark.cc
+++ b/absl/synchronization/mutex_benchmark.cc
@@ -61,8 +61,124 @@
   std::mutex* mu_;
 };
 
+// RAII object to change the Mutex priority of the running thread.
+class ScopedThreadMutexPriority {
+ public:
+  explicit ScopedThreadMutexPriority(int priority) {
+    absl::base_internal::ThreadIdentity* identity =
+        absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+    identity->per_thread_synch.priority = priority;
+    // Bump next_priority_read_cycles to the infinite future so that the
+    // implementation doesn't re-read the thread's actual scheduler priority
+    // and replace our temporary scoped priority.
+    identity->per_thread_synch.next_priority_read_cycles =
+        std::numeric_limits<int64_t>::max();
+  }
+  ~ScopedThreadMutexPriority() {
+    // Reset the "next priority read time" back to the infinite past so that
+    // the next time the Mutex implementation wants to know this thread's
+    // priority, it re-reads it from the OS instead of using our overridden
+    // priority.
+    absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
+        ->per_thread_synch.next_priority_read_cycles =
+        std::numeric_limits<int64_t>::min();
+  }
+};
+
+void BM_MutexEnqueue(benchmark::State& state) {
+  // In the "multiple priorities" variant of the benchmark, one of the
+  // threads runs with Mutex priority 0 while the rest run at elevated priority.
+  // This benchmarks the performance impact of the presence of a low priority
+  // waiter when a higher priority waiter adds itself of the queue
+  // (b/175224064).
+  //
+  // NOTE: The actual scheduler priority is not modified in this benchmark:
+  // all of the threads get CPU slices with the same priority. Only the
+  // Mutex queueing behavior is modified.
+  const bool multiple_priorities = state.range(0);
+  ScopedThreadMutexPriority priority_setter(
+      (multiple_priorities && state.thread_index != 0) ? 1 : 0);
+
+  struct Shared {
+    absl::Mutex mu;
+    std::atomic<int> looping_threads{0};
+    std::atomic<int> blocked_threads{0};
+    std::atomic<bool> thread_has_mutex{false};
+  };
+  static Shared* shared = new Shared;
+
+  // Set up 'blocked_threads' to count how many threads are currently blocked
+  // in Abseil synchronization code.
+  //
+  // NOTE: Blocking done within the Google Benchmark library itself (e.g.
+  // the barrier which synchronizes threads entering and exiting the benchmark
+  // loop) does _not_ get registered in this counter. This is because Google
+  // Benchmark uses its own synchronization primitives based on std::mutex, not
+  // Abseil synchronization primitives. If at some point the benchmark library
+  // merges into Abseil, this code may break.
+  absl::synchronization_internal::PerThreadSem::SetThreadBlockedCounter(
+      &shared->blocked_threads);
+
+  // The benchmark framework may run several iterations in the same process,
+  // reusing the same static-initialized 'shared' object. Given the semantics
+  // of the members, here, we expect everything to be reset to zero by the
+  // end of any iteration. Assert that's the case, just to be sure.
+  ABSL_RAW_CHECK(
+      shared->looping_threads.load(std::memory_order_relaxed) == 0 &&
+          shared->blocked_threads.load(std::memory_order_relaxed) == 0 &&
+          !shared->thread_has_mutex.load(std::memory_order_relaxed),
+      "Shared state isn't zeroed at start of benchmark iteration");
+
+  static constexpr int kBatchSize = 1000;
+  while (state.KeepRunningBatch(kBatchSize)) {
+    shared->looping_threads.fetch_add(1);
+    for (int i = 0; i < kBatchSize; i++) {
+      {
+        absl::MutexLock l(&shared->mu);
+        shared->thread_has_mutex.store(true, std::memory_order_relaxed);
+        // Spin until all other threads are either out of the benchmark loop
+        // or blocked on the mutex. This ensures that the mutex queue is kept
+        // at its maximal length to benchmark the performance of queueing on
+        // a highly contended mutex.
+        while (shared->looping_threads.load(std::memory_order_relaxed) -
+                   shared->blocked_threads.load(std::memory_order_relaxed) !=
+               1) {
+        }
+        shared->thread_has_mutex.store(false);
+      }
+      // Spin until some other thread has acquired the mutex before we block
+      // again. This ensures that we always go through the slow (queueing)
+      // acquisition path rather than reacquiring the mutex we just released.
+      while (!shared->thread_has_mutex.load(std::memory_order_relaxed) &&
+             shared->looping_threads.load(std::memory_order_relaxed) > 1) {
+      }
+    }
+    // The benchmark framework uses a barrier to ensure that all of the threads
+    // complete their benchmark loop together before any of the threads exit
+    // the loop. So, we need to remove ourselves from the "looping threads"
+    // counter here before potentially blocking on that barrier. Otherwise,
+    // another thread spinning above might wait forever for this thread to
+    // block on the mutex while we in fact are waiting to exit.
+    shared->looping_threads.fetch_add(-1);
+  }
+  absl::synchronization_internal::PerThreadSem::SetThreadBlockedCounter(
+      nullptr);
+}
+
+BENCHMARK(BM_MutexEnqueue)
+    ->Threads(4)
+    ->Threads(64)
+    ->Threads(128)
+    ->Threads(512)
+    ->ArgName("multiple_priorities")
+    ->Arg(false)
+    ->Arg(true);
+
 template <typename MutexType>
 void BM_Contended(benchmark::State& state) {
+  int priority = state.thread_index % state.range(1);
+  ScopedThreadMutexPriority priority_setter(priority);
+
   struct Shared {
     MutexType mu;
     int data = 0;
@@ -85,81 +201,51 @@
     DelayNs(state.range(0), &shared->data);
   }
 }
+void SetupBenchmarkArgs(benchmark::internal::Benchmark* bm,
+                        bool do_test_priorities) {
+  const int max_num_priorities = do_test_priorities ? 2 : 1;
+  bm->UseRealTime()
+      // ThreadPerCpu poorly handles non-power-of-two CPU counts.
+      ->Threads(1)
+      ->Threads(2)
+      ->Threads(4)
+      ->Threads(6)
+      ->Threads(8)
+      ->Threads(12)
+      ->Threads(16)
+      ->Threads(24)
+      ->Threads(32)
+      ->Threads(48)
+      ->Threads(64)
+      ->Threads(96)
+      ->Threads(128)
+      ->Threads(192)
+      ->Threads(256)
+      ->ArgNames({"cs_ns", "num_prios"});
+  // Some empirically chosen amounts of work in critical section.
+  // 1 is low contention, 2000 is high contention and few values in between.
+  for (int critical_section_ns : {1, 20, 50, 200, 2000}) {
+    for (int num_priorities = 1; num_priorities <= max_num_priorities;
+         num_priorities++) {
+      bm->ArgPair(critical_section_ns, num_priorities);
+    }
+  }
+}
 
 BENCHMARK_TEMPLATE(BM_Contended, absl::Mutex)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
+    ->Apply([](benchmark::internal::Benchmark* bm) {
+      SetupBenchmarkArgs(bm, /*do_test_priorities=*/true);
+    });
 
 BENCHMARK_TEMPLATE(BM_Contended, absl::base_internal::SpinLock)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
+    ->Apply([](benchmark::internal::Benchmark* bm) {
+      SetupBenchmarkArgs(bm, /*do_test_priorities=*/false);
+    });
 
 BENCHMARK_TEMPLATE(BM_Contended, std::mutex)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
+    ->Apply([](benchmark::internal::Benchmark* bm) {
+      SetupBenchmarkArgs(bm, /*do_test_priorities=*/false);
+    });
 
 // Measure the overhead of conditions on mutex release (when they must be
 // evaluated).  Mutex has (some) support for equivalence classes allowing