Merge "Update plugin docs for plugin tweaks in aosp/2717496" into main
diff --git a/Android.bp b/Android.bp
index 5af8aec..83823dc 100644
--- a/Android.bp
+++ b/Android.bp
@@ -4590,6 +4590,7 @@
         "protos/perfetto/metrics/android/android_frame_timeline_metric.proto",
         "protos/perfetto/metrics/android/android_sysui_notifications_blocking_calls_metric.proto",
         "protos/perfetto/metrics/android/android_trusty_workqueues.proto",
+        "protos/perfetto/metrics/android/anr_metric.proto",
         "protos/perfetto/metrics/android/batt_metric.proto",
         "protos/perfetto/metrics/android/binder_metric.proto",
         "protos/perfetto/metrics/android/camera_metric.proto",
@@ -4669,6 +4670,7 @@
         "protos/perfetto/metrics/android/android_frame_timeline_metric.proto",
         "protos/perfetto/metrics/android/android_sysui_notifications_blocking_calls_metric.proto",
         "protos/perfetto/metrics/android/android_trusty_workqueues.proto",
+        "protos/perfetto/metrics/android/anr_metric.proto",
         "protos/perfetto/metrics/android/batt_metric.proto",
         "protos/perfetto/metrics/android/binder_metric.proto",
         "protos/perfetto/metrics/android/camera_metric.proto",
@@ -4730,6 +4732,7 @@
         "protos/perfetto/metrics/android/android_frame_timeline_metric.proto",
         "protos/perfetto/metrics/android/android_sysui_notifications_blocking_calls_metric.proto",
         "protos/perfetto/metrics/android/android_trusty_workqueues.proto",
+        "protos/perfetto/metrics/android/anr_metric.proto",
         "protos/perfetto/metrics/android/batt_metric.proto",
         "protos/perfetto/metrics/android/binder_metric.proto",
         "protos/perfetto/metrics/android/camera_metric.proto",
@@ -10400,6 +10403,7 @@
 genrule {
     name: "perfetto_src_trace_processor_metrics_sql_gen_amalgamated_sql_metrics",
     srcs: [
+        "src/trace_processor/metrics/sql/android/android_anr.sql",
         "src/trace_processor/metrics/sql/android/android_batt.sql",
         "src/trace_processor/metrics/sql/android/android_binder.sql",
         "src/trace_processor/metrics/sql/android/android_blocking_calls_cuj_metric.sql",
@@ -10441,6 +10445,7 @@
         "src/trace_processor/metrics/sql/android/android_task_names.sql",
         "src/trace_processor/metrics/sql/android/android_trace_quality.sql",
         "src/trace_processor/metrics/sql/android/android_trusty_workqueues.sql",
+        "src/trace_processor/metrics/sql/android/anr/anrs.sql",
         "src/trace_processor/metrics/sql/android/codec_metrics.sql",
         "src/trace_processor/metrics/sql/android/composer_execution.sql",
         "src/trace_processor/metrics/sql/android/composition_layers.sql",
diff --git a/BUILD b/BUILD
index 3ae4c0d..8262e01 100644
--- a/BUILD
+++ b/BUILD
@@ -849,6 +849,7 @@
         "include/perfetto/tracing/core/chrome_config.h",
         "include/perfetto/tracing/core/data_source_config.h",
         "include/perfetto/tracing/core/data_source_descriptor.h",
+        "include/perfetto/tracing/core/flush_flags.h",
         "include/perfetto/tracing/core/trace_config.h",
         "include/perfetto/tracing/core/tracing_service_capabilities.h",
         "include/perfetto/tracing/core/tracing_service_state.h",
@@ -1833,6 +1834,7 @@
 perfetto_filegroup(
     name = "src_trace_processor_metrics_sql_android_android",
     srcs = [
+        "src/trace_processor/metrics/sql/android/android_anr.sql",
         "src/trace_processor/metrics/sql/android/android_batt.sql",
         "src/trace_processor/metrics/sql/android/android_binder.sql",
         "src/trace_processor/metrics/sql/android/android_blocking_calls_cuj_metric.sql",
@@ -1874,6 +1876,7 @@
         "src/trace_processor/metrics/sql/android/android_task_names.sql",
         "src/trace_processor/metrics/sql/android/android_trace_quality.sql",
         "src/trace_processor/metrics/sql/android/android_trusty_workqueues.sql",
+        "src/trace_processor/metrics/sql/android/anr/anrs.sql",
         "src/trace_processor/metrics/sql/android/codec_metrics.sql",
         "src/trace_processor/metrics/sql/android/composer_execution.sql",
         "src/trace_processor/metrics/sql/android/composition_layers.sql",
@@ -4068,6 +4071,7 @@
         "protos/perfetto/metrics/android/android_frame_timeline_metric.proto",
         "protos/perfetto/metrics/android/android_sysui_notifications_blocking_calls_metric.proto",
         "protos/perfetto/metrics/android/android_trusty_workqueues.proto",
+        "protos/perfetto/metrics/android/anr_metric.proto",
         "protos/perfetto/metrics/android/batt_metric.proto",
         "protos/perfetto/metrics/android/binder_metric.proto",
         "protos/perfetto/metrics/android/camera_metric.proto",
diff --git a/include/perfetto/ext/tracing/core/producer.h b/include/perfetto/ext/tracing/core/producer.h
index d1ca85d..234c406 100644
--- a/include/perfetto/ext/tracing/core/producer.h
+++ b/include/perfetto/ext/tracing/core/producer.h
@@ -19,7 +19,9 @@
 
 #include "perfetto/base/export.h"
 #include "perfetto/ext/tracing/core/basic_types.h"
+#include "perfetto/tracing/core/flush_flags.h"
 #include "perfetto/tracing/core/forward_decls.h"
+
 namespace perfetto {
 
 class SharedMemory;
@@ -106,7 +108,8 @@
   // flushes < N have also been committed.
   virtual void Flush(FlushRequestID,
                      const DataSourceInstanceID* data_source_ids,
-                     size_t num_data_sources) = 0;
+                     size_t num_data_sources,
+                     FlushFlags) = 0;
 
   // Called by the service to instruct the given data sources to stop referring
   // to any trace contents emitted so far. The intent is that after processing
diff --git a/include/perfetto/ext/tracing/core/tracing_service.h b/include/perfetto/ext/tracing/core/tracing_service.h
index b82a96a..690795f 100644
--- a/include/perfetto/ext/tracing/core/tracing_service.h
+++ b/include/perfetto/ext/tracing/core/tracing_service.h
@@ -30,6 +30,7 @@
 #include "perfetto/ext/tracing/core/shared_memory.h"
 #include "perfetto/ext/tracing/core/trace_packet.h"
 #include "perfetto/tracing/buffer_exhausted_policy.h"
+#include "perfetto/tracing/core/flush_flags.h"
 #include "perfetto/tracing/core/forward_decls.h"
 
 namespace perfetto {
@@ -203,7 +204,15 @@
   // if that one is not set (or is set to 0), kDefaultFlushTimeoutMs (5s) is
   // used.
   using FlushCallback = std::function<void(bool /*success*/)>;
-  virtual void Flush(uint32_t timeout_ms, FlushCallback) = 0;
+  virtual void Flush(uint32_t timeout_ms, FlushCallback callback, FlushFlags);
+
+  // The only caller of this method is arctraceservice's PerfettoClient.
+  // Everything else in the codebase uses the 3-arg Flush() above.
+  // TODO(primiano): remove the overload without FlushFlags once
+  // arctraceservice moves away from this interface. arctraceservice lives in
+  // the internal repo and changes to this interface require multi-side patches.
+  // Inernally this calls Flush(timeout, callback, FlushFlags(0)).
+  virtual void Flush(uint32_t timeout_ms, FlushCallback callback);
 
   // Tracing data will be delivered invoking Consumer::OnTraceData().
   virtual void ReadBuffers() = 0;
diff --git a/include/perfetto/tracing/core/BUILD.gn b/include/perfetto/tracing/core/BUILD.gn
index c961c2d..9ea0e2c 100644
--- a/include/perfetto/tracing/core/BUILD.gn
+++ b/include/perfetto/tracing/core/BUILD.gn
@@ -22,6 +22,7 @@
     "chrome_config.h",
     "data_source_config.h",
     "data_source_descriptor.h",
+    "flush_flags.h",
     "trace_config.h",
     "tracing_service_capabilities.h",
     "tracing_service_state.h",
diff --git a/include/perfetto/tracing/core/flush_flags.h b/include/perfetto/tracing/core/flush_flags.h
new file mode 100644
index 0000000..12445b5
--- /dev/null
+++ b/include/perfetto/tracing/core/flush_flags.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_CORE_FLUSH_FLAGS_H_
+#define INCLUDE_PERFETTO_TRACING_CORE_FLUSH_FLAGS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace perfetto {
+
+// This class is a wrapper around the uint64_t flags that are sent across the
+// tracing protocol whenenver a flush occurs. It helps determining the reason
+// and initiator of the flush.
+// NOTE: the values here are part of the tracing protocol ABI. Do not renumber.
+class FlushFlags {
+ public:
+  enum class Initiator : uint64_t {
+    // DO NOT RENUMBER, ABI.
+    kUnknown = 0,
+    kTraced = 1,
+    kPerfettoCmd = 2,
+    kConsumerSdk = 3,
+    kMax,
+  };
+
+  enum class Reason : uint64_t {
+    // DO NOT RENUMBER, ABI.
+    kUnknown = 0,
+    kPeriodic = 1,
+    kTraceStop = 2,
+    kTraceClone = 3,
+    kExplicit = 4,
+    kMax,
+  };
+
+  enum class CloneTarget : uint64_t {
+    // DO NOT RENUMBER, ABI.
+    kUnknown = 0,
+    kBugreport = 1,
+    kMax,
+  };
+
+  explicit FlushFlags(uint64_t flags = 0) : flags_(flags) {}
+  FlushFlags(Initiator i, Reason r, CloneTarget c = CloneTarget::kUnknown)
+      : flags_((static_cast<uint64_t>(i) << kInitiatorShift) |
+               (static_cast<uint64_t>(r) << kReasonShift) |
+               (static_cast<uint64_t>(c) << kCloneTargetShift)) {}
+
+  bool operator==(const FlushFlags& o) const { return flags_ == o.flags_; }
+  bool operator!=(const FlushFlags& o) const { return !(*this == o); }
+
+  Initiator initiator() const {
+    // Due to version mismatch we might see a value from the future that we
+    // didn't know yet. If that happens, short ciruit to kUnknown.
+    static_assert(
+        uint64_t(Initiator::kMax) - 1 <= (kInitiatorMask >> kInitiatorShift),
+        "enum out of range");
+    const uint64_t value = (flags_ & kInitiatorMask) >> kInitiatorShift;
+    return value < uint64_t(Initiator::kMax) ? Initiator(value)
+                                             : Initiator::kUnknown;
+  }
+
+  Reason reason() const {
+    static_assert(uint64_t(Reason::kMax) - 1 <= (kReasonMask >> kReasonShift),
+                  "enum out of range");
+    const uint64_t value = (flags_ & kReasonMask) >> kReasonShift;
+    return value < uint64_t(Reason::kMax) ? Reason(value) : Reason::kUnknown;
+  }
+
+  CloneTarget clone_target() const {
+    static_assert(uint64_t(CloneTarget::kMax) - 1 <=
+                      (kCloneTargetMask >> kCloneTargetShift),
+                  "enum out of range");
+    const uint64_t value = (flags_ & kCloneTargetMask) >> kCloneTargetShift;
+    return value < uint64_t(CloneTarget::kMax) ? CloneTarget(value)
+                                               : CloneTarget::kUnknown;
+  }
+
+  uint64_t flags() const { return flags_; }
+
+ private:
+  // DO NOT CHANGE, ABI.
+  static constexpr uint64_t kReasonMask = 0xF;
+  static constexpr uint64_t kReasonShift = 0;
+  static constexpr uint64_t kInitiatorMask = 0xF0;
+  static constexpr uint64_t kInitiatorShift = 4;
+  static constexpr uint64_t kCloneTargetMask = 0xF00;
+  static constexpr uint64_t kCloneTargetShift = 8;
+
+  uint64_t flags_ = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_CORE_FLUSH_FLAGS_H_
diff --git a/include/perfetto/tracing/data_source.h b/include/perfetto/tracing/data_source.h
index 925e4dd..4bcd04e 100644
--- a/include/perfetto/tracing/data_source.h
+++ b/include/perfetto/tracing/data_source.h
@@ -33,6 +33,7 @@
 
 #include "perfetto/protozero/message_handle.h"
 #include "perfetto/tracing/buffer_exhausted_policy.h"
+#include "perfetto/tracing/core/flush_flags.h"
 #include "perfetto/tracing/core/forward_decls.h"
 #include "perfetto/tracing/internal/basic_types.h"
 #include "perfetto/tracing/internal/data_source_internal.h"
@@ -160,6 +161,9 @@
 
     // The index of this data source instance (0..kMaxDataSourceInstances - 1).
     uint32_t internal_instance_index = 0;
+
+    // The reason and initiator of the flush. See flush_flags.h .
+    FlushFlags flush_flags;
   };
   // Called when the tracing service requests a Flush. Users can override this
   // to tell other threads to flush their TraceContext for this data source
diff --git a/protos/perfetto/config/perfetto_config.proto b/protos/perfetto/config/perfetto_config.proto
index f457976..a259573 100644
--- a/protos/perfetto/config/perfetto_config.proto
+++ b/protos/perfetto/config/perfetto_config.proto
@@ -2873,6 +2873,21 @@
       DISCARD = 2;
     }
     optional FillPolicy fill_policy = 4;
+
+    // When true the buffer is moved (rather than copied) onto the cloned
+    // session, and an empty buffer of the same size is allocated in the source
+    // tracing session. This feature will likely get deprecated in the future.
+    // It been introduced mainly to support the surfaceflinger snapshot dump
+    // for bugreports, where SF can dumps O(400MB) into the bugreport trace. In
+    // that case we don't want to retain another in-memory copy of the buffer.
+    optional bool transfer_on_clone = 5;
+
+    // Used in conjuction with transfer_on_clone. When true the buffer is
+    // cleared before issuing the Flush(reason=kTraceClone). This is to ensure
+    // that if the data source took too long to write the data in a previous
+    // clone-related flush, we don't end up with a mixture of leftovers from
+    // the previous write and new data.
+    optional bool clear_before_clone = 6;
   }
   repeated BufferConfig buffers = 1;
 
diff --git a/protos/perfetto/config/trace_config.proto b/protos/perfetto/config/trace_config.proto
index d62b0c7..d19e73a 100644
--- a/protos/perfetto/config/trace_config.proto
+++ b/protos/perfetto/config/trace_config.proto
@@ -52,6 +52,21 @@
       DISCARD = 2;
     }
     optional FillPolicy fill_policy = 4;
+
+    // When true the buffer is moved (rather than copied) onto the cloned
+    // session, and an empty buffer of the same size is allocated in the source
+    // tracing session. This feature will likely get deprecated in the future.
+    // It been introduced mainly to support the surfaceflinger snapshot dump
+    // for bugreports, where SF can dumps O(400MB) into the bugreport trace. In
+    // that case we don't want to retain another in-memory copy of the buffer.
+    optional bool transfer_on_clone = 5;
+
+    // Used in conjuction with transfer_on_clone. When true the buffer is
+    // cleared before issuing the Flush(reason=kTraceClone). This is to ensure
+    // that if the data source took too long to write the data in a previous
+    // clone-related flush, we don't end up with a mixture of leftovers from
+    // the previous write and new data.
+    optional bool clear_before_clone = 6;
   }
   repeated BufferConfig buffers = 1;
 
diff --git a/protos/perfetto/ipc/consumer_port.proto b/protos/perfetto/ipc/consumer_port.proto
index 2fdc919..b587846 100644
--- a/protos/perfetto/ipc/consumer_port.proto
+++ b/protos/perfetto/ipc/consumer_port.proto
@@ -205,6 +205,10 @@
 // Arguments for rpc Flush().
 message FlushRequest {
   optional uint32 timeout_ms = 1;
+
+  // More details such as flush reason and originator. Introduced in v38 / V.
+  // See FlushFlags in include/perfetto/ext/tracing/core/flush_flags.h.
+  optional uint64 flags = 2;
 }
 
 message FlushResponse {}
diff --git a/protos/perfetto/ipc/producer_port.proto b/protos/perfetto/ipc/producer_port.proto
index 5c204cb..9e3d5d1 100644
--- a/protos/perfetto/ipc/producer_port.proto
+++ b/protos/perfetto/ipc/producer_port.proto
@@ -318,6 +318,10 @@
     // expected to copy this value back into the CommitDataRequest, so the
     // service can tell when the data for this flush has been committed.
     optional uint64 request_id = 2;
+
+    // More details such as flush reason and originator. Introduced in v38 / V.
+    // See FlushFlags in include/perfetto/ext/tracing/core/flush_flags.h.
+    optional uint64 flags = 3;
   }
 
   // Instructs the given data sources to stop referring to any trace contents
diff --git a/protos/perfetto/metrics/android/BUILD.gn b/protos/perfetto/metrics/android/BUILD.gn
index 3c18498..bd639d8 100644
--- a/protos/perfetto/metrics/android/BUILD.gn
+++ b/protos/perfetto/metrics/android/BUILD.gn
@@ -25,6 +25,7 @@
     "android_frame_timeline_metric.proto",
     "android_sysui_notifications_blocking_calls_metric.proto",
     "android_trusty_workqueues.proto",
+    "anr_metric.proto",
     "batt_metric.proto",
     "binder_metric.proto",
     "camera_metric.proto",
diff --git a/protos/perfetto/metrics/android/anr_metric.proto b/protos/perfetto/metrics/android/anr_metric.proto
new file mode 100644
index 0000000..30d3fc2
--- /dev/null
+++ b/protos/perfetto/metrics/android/anr_metric.proto
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ syntax = "proto2";
+
+ package perfetto.protos;
+
+ message AndroidAnrMetric {
+  repeated Anr anr = 1;
+
+  // Next id: 12
+  message Anr {
+    // UUID that identifies the ANR.
+    optional string error_id = 1;
+
+    // Name of the process that ANRed.
+    optional string process_name = 2;
+
+    // PID of the ANRing process.
+    optional int32 pid = 3;
+
+    // ANR subject line.
+    optional string subject = 4;
+
+    // Timestamp of the ANR in the trace.
+    optional int64 ts = 5;
+  }
+}
\ No newline at end of file
diff --git a/protos/perfetto/metrics/metrics.proto b/protos/perfetto/metrics/metrics.proto
index dfad850..feb2d72 100644
--- a/protos/perfetto/metrics/metrics.proto
+++ b/protos/perfetto/metrics/metrics.proto
@@ -19,6 +19,7 @@
 package perfetto.protos;
 
 import "protos/perfetto/metrics/android/android_frame_timeline_metric.proto";
+import "protos/perfetto/metrics/android/anr_metric.proto";
 import "protos/perfetto/metrics/android/batt_metric.proto";
 import "protos/perfetto/metrics/android/android_sysui_notifications_blocking_calls_metric.proto";
 import "protos/perfetto/metrics/android/android_blocking_calls_cuj_metric.proto";
@@ -106,7 +107,7 @@
 
 // Root message for all Perfetto-based metrics.
 //
-// Next id: 55
+// Next id: 56
 message TraceMetrics {
   reserved 4, 10, 13, 14, 16, 19;
 
@@ -256,6 +257,9 @@
   // is clear that this data is necessary.
   optional AndroidIoUnaggregated android_io_unagg = 54;
 
+  // Metrics for App Not Responding (ANR) errors.
+  optional AndroidAnrMetric android_anr = 55;
+
   // Demo extensions.
   extensions 450 to 499;
 
diff --git a/protos/perfetto/metrics/perfetto_merged_metrics.proto b/protos/perfetto/metrics/perfetto_merged_metrics.proto
index 7106f0e..588cd05 100644
--- a/protos/perfetto/metrics/perfetto_merged_metrics.proto
+++ b/protos/perfetto/metrics/perfetto_merged_metrics.proto
@@ -191,6 +191,31 @@
 
 // End of protos/perfetto/metrics/android/android_trusty_workqueues.proto
 
+// Begin of protos/perfetto/metrics/android/anr_metric.proto
+
+ message AndroidAnrMetric {
+  repeated Anr anr = 1;
+
+  // Next id: 12
+  message Anr {
+    // UUID that identifies the ANR.
+    optional string error_id = 1;
+
+    // Name of the process that ANRed.
+    optional string process_name = 2;
+
+    // PID of the ANRing process.
+    optional int32 pid = 3;
+
+    // ANR subject line.
+    optional string subject = 4;
+
+    // Timestamp of the ANR in the trace.
+    optional int64 ts = 5;
+  }
+}
+// End of protos/perfetto/metrics/android/anr_metric.proto
+
 // Begin of protos/perfetto/metrics/android/batt_metric.proto
 
 message AndroidBatteryMetric {
@@ -2219,7 +2244,7 @@
 
 // Root message for all Perfetto-based metrics.
 //
-// Next id: 55
+// Next id: 56
 message TraceMetrics {
   reserved 4, 10, 13, 14, 16, 19;
 
@@ -2369,6 +2394,9 @@
   // is clear that this data is necessary.
   optional AndroidIoUnaggregated android_io_unagg = 54;
 
+  // Metrics for App Not Responding (ANR) errors.
+  optional AndroidAnrMetric android_anr = 55;
+
   // Demo extensions.
   extensions 450 to 499;
 
diff --git a/protos/perfetto/trace/perfetto_trace.proto b/protos/perfetto/trace/perfetto_trace.proto
index 20cbdd3..3e12a3a 100644
--- a/protos/perfetto/trace/perfetto_trace.proto
+++ b/protos/perfetto/trace/perfetto_trace.proto
@@ -2873,6 +2873,21 @@
       DISCARD = 2;
     }
     optional FillPolicy fill_policy = 4;
+
+    // When true the buffer is moved (rather than copied) onto the cloned
+    // session, and an empty buffer of the same size is allocated in the source
+    // tracing session. This feature will likely get deprecated in the future.
+    // It been introduced mainly to support the surfaceflinger snapshot dump
+    // for bugreports, where SF can dumps O(400MB) into the bugreport trace. In
+    // that case we don't want to retain another in-memory copy of the buffer.
+    optional bool transfer_on_clone = 5;
+
+    // Used in conjuction with transfer_on_clone. When true the buffer is
+    // cleared before issuing the Flush(reason=kTraceClone). This is to ensure
+    // that if the data source took too long to write the data in a previous
+    // clone-related flush, we don't end up with a mixture of leftovers from
+    // the previous write and new data.
+    optional bool clear_before_clone = 6;
   }
   repeated BufferConfig buffers = 1;
 
@@ -10884,8 +10899,11 @@
   // Contact perfetto-dev@googlegroups.com if you are interested in a subrange
   // for your project.
 
+  // Extension range reserved for chromium:
+  // https://source.chromium.org/chromium/chromium/src/+/main:base/tracing/protos/chrome_track_event.proto
+  extensions 1000 to 1999;
   // Extension range for future use.
-  extensions 1000 to 9899;
+  extensions 2000 to 9899;
   // Reserved for Perfetto unit and integration tests.
   extensions 9900 to 10000;
 
diff --git a/protos/perfetto/trace/track_event/track_event.proto b/protos/perfetto/trace/track_event/track_event.proto
index e57e014..9991979 100644
--- a/protos/perfetto/trace/track_event/track_event.proto
+++ b/protos/perfetto/trace/track_event/track_event.proto
@@ -280,8 +280,11 @@
   // Contact perfetto-dev@googlegroups.com if you are interested in a subrange
   // for your project.
 
+  // Extension range reserved for chromium:
+  // https://source.chromium.org/chromium/chromium/src/+/main:base/tracing/protos/chrome_track_event.proto
+  extensions 1000 to 1999;
   // Extension range for future use.
-  extensions 1000 to 9899;
+  extensions 2000 to 9899;
   // Reserved for Perfetto unit and integration tests.
   extensions 9900 to 10000;
 
diff --git a/python/perfetto/trace_processor/metrics.descriptor b/python/perfetto/trace_processor/metrics.descriptor
index 820f47e..8f80340 100644
--- a/python/perfetto/trace_processor/metrics.descriptor
+++ b/python/perfetto/trace_processor/metrics.descriptor
Binary files differ
diff --git a/src/perfetto_cmd/perfetto_cmd.cc b/src/perfetto_cmd/perfetto_cmd.cc
index 3052341..2ddbf04 100644
--- a/src/perfetto_cmd/perfetto_cmd.cc
+++ b/src/perfetto_cmd/perfetto_cmd.cc
@@ -1264,13 +1264,17 @@
       return;
     PERFETTO_LOG("SIGINT/SIGTERM received: disabling tracing.");
     weak_this->ctrl_c_evt_.Clear();
-    weak_this->consumer_endpoint_->Flush(0, [weak_this](bool flush_success) {
-      if (!weak_this)
-        return;
-      if (!flush_success)
-        PERFETTO_ELOG("Final flush unsuccessful.");
-      weak_this->consumer_endpoint_->DisableTracing();
-    });
+    weak_this->consumer_endpoint_->Flush(
+        0,
+        [weak_this](bool flush_success) {
+          if (!weak_this)
+            return;
+          if (!flush_success)
+            PERFETTO_ELOG("Final flush unsuccessful.");
+          weak_this->consumer_endpoint_->DisableTracing();
+        },
+        FlushFlags(FlushFlags::Initiator::kPerfettoCmd,
+                   FlushFlags::Reason::kTraceStop));
   });
 }
 
@@ -1305,13 +1309,17 @@
 
   if (stop_trace_once_attached_) {
     auto weak_this = weak_factory_.GetWeakPtr();
-    consumer_endpoint_->Flush(0, [weak_this](bool flush_success) {
-      if (!weak_this)
-        return;
-      if (!flush_success)
-        PERFETTO_ELOG("Final flush unsuccessful.");
-      weak_this->consumer_endpoint_->DisableTracing();
-    });
+    consumer_endpoint_->Flush(
+        0,
+        [weak_this](bool flush_success) {
+          if (!weak_this)
+            return;
+          if (!flush_success)
+            PERFETTO_ELOG("Final flush unsuccessful.");
+          weak_this->consumer_endpoint_->DisableTracing();
+        },
+        FlushFlags(FlushFlags::Initiator::kPerfettoCmd,
+                   FlushFlags::Reason::kTraceStop));
   }
 }
 
diff --git a/src/perfetto_cmd/trigger_producer.cc b/src/perfetto_cmd/trigger_producer.cc
index 7aefabd..a3abf5a 100644
--- a/src/perfetto_cmd/trigger_producer.cc
+++ b/src/perfetto_cmd/trigger_producer.cc
@@ -81,7 +81,8 @@
 }
 void TriggerProducer::Flush(FlushRequestID,
                             const DataSourceInstanceID*,
-                            size_t) {
+                            size_t,
+                            FlushFlags) {
   PERFETTO_DFATAL("Attempted to Flush() on commandline producer");
 }
 
diff --git a/src/perfetto_cmd/trigger_producer.h b/src/perfetto_cmd/trigger_producer.h
index 0a31bd3..90cd489 100644
--- a/src/perfetto_cmd/trigger_producer.h
+++ b/src/perfetto_cmd/trigger_producer.h
@@ -47,7 +47,10 @@
   void SetupDataSource(DataSourceInstanceID, const DataSourceConfig&) override;
   void StartDataSource(DataSourceInstanceID, const DataSourceConfig&) override;
   void StopDataSource(DataSourceInstanceID) override;
-  void Flush(FlushRequestID, const DataSourceInstanceID*, size_t) override;
+  void Flush(FlushRequestID,
+             const DataSourceInstanceID*,
+             size_t,
+             FlushFlags) override;
   void ClearIncrementalState(const DataSourceInstanceID* data_source_ids,
                              size_t num_data_sources) override;
 
diff --git a/src/profiling/memory/heapprofd_producer.cc b/src/profiling/memory/heapprofd_producer.cc
index d4b8ccc..7490115 100644
--- a/src/profiling/memory/heapprofd_producer.cc
+++ b/src/profiling/memory/heapprofd_producer.cc
@@ -765,7 +765,8 @@
 
 void HeapprofdProducer::Flush(FlushRequestID flush_id,
                               const DataSourceInstanceID* ids,
-                              size_t num_ids) {
+                              size_t num_ids,
+                              FlushFlags) {
   size_t& flush_in_progress = flushes_in_progress_[flush_id];
   PERFETTO_DCHECK(flush_in_progress == 0);
   flush_in_progress = num_ids;
diff --git a/src/profiling/memory/heapprofd_producer.h b/src/profiling/memory/heapprofd_producer.h
index fa1f058..f1473c8 100644
--- a/src/profiling/memory/heapprofd_producer.h
+++ b/src/profiling/memory/heapprofd_producer.h
@@ -119,7 +119,8 @@
   void OnTracingSetup() override;
   void Flush(FlushRequestID,
              const DataSourceInstanceID* data_source_ids,
-             size_t num_data_sources) override;
+             size_t num_data_sources,
+             FlushFlags) override;
   void ClearIncrementalState(const DataSourceInstanceID* /*data_source_ids*/,
                              size_t /*num_data_sources*/) override {}
 
diff --git a/src/profiling/memory/java_hprof_producer.cc b/src/profiling/memory/java_hprof_producer.cc
index 01bd01c..9049bc3 100644
--- a/src/profiling/memory/java_hprof_producer.cc
+++ b/src/profiling/memory/java_hprof_producer.cc
@@ -163,7 +163,8 @@
 
 void JavaHprofProducer::Flush(FlushRequestID flush_id,
                               const DataSourceInstanceID*,
-                              size_t) {
+                              size_t,
+                              FlushFlags) {
   endpoint_->NotifyFlushComplete(flush_id);
 }
 
diff --git a/src/profiling/memory/java_hprof_producer.h b/src/profiling/memory/java_hprof_producer.h
index 4355d96..8591718 100644
--- a/src/profiling/memory/java_hprof_producer.h
+++ b/src/profiling/memory/java_hprof_producer.h
@@ -52,7 +52,8 @@
   void OnTracingSetup() override {}
   void Flush(FlushRequestID,
              const DataSourceInstanceID* data_source_ids,
-             size_t num_data_sources) override;
+             size_t num_data_sources,
+             FlushFlags) override;
   void ClearIncrementalState(const DataSourceInstanceID* /*data_source_ids*/,
                              size_t /*num_data_sources*/) override {}
   // TODO(fmayer): Refactor once/if we have generic reconnect logic.
diff --git a/src/profiling/perf/perf_producer.cc b/src/profiling/perf/perf_producer.cc
index fec5b6d..109b580 100644
--- a/src/profiling/perf/perf_producer.cc
+++ b/src/profiling/perf/perf_producer.cc
@@ -561,7 +561,8 @@
 // the SMB.
 void PerfProducer::Flush(FlushRequestID flush_id,
                          const DataSourceInstanceID* data_source_ids,
-                         size_t num_data_sources) {
+                         size_t num_data_sources,
+                         FlushFlags) {
   // Flush metatracing if requested.
   for (size_t i = 0; i < num_data_sources; i++) {
     auto ds_id = data_source_ids[i];
diff --git a/src/profiling/perf/perf_producer.h b/src/profiling/perf/perf_producer.h
index 83174d3..8cd6eb2 100644
--- a/src/profiling/perf/perf_producer.h
+++ b/src/profiling/perf/perf_producer.h
@@ -78,7 +78,8 @@
   void StopDataSource(DataSourceInstanceID instance_id) override;
   void Flush(FlushRequestID flush_id,
              const DataSourceInstanceID* data_source_ids,
-             size_t num_data_sources) override;
+             size_t num_data_sources,
+             FlushFlags) override;
   void ClearIncrementalState(const DataSourceInstanceID* data_source_ids,
                              size_t num_data_sources) override;
 
diff --git a/src/trace_processor/importers/json/json_utils.cc b/src/trace_processor/importers/json/json_utils.cc
index be9492b..d2e1c18 100644
--- a/src/trace_processor/importers/json/json_utils.cc
+++ b/src/trace_processor/importers/json/json_utils.cc
@@ -62,17 +62,37 @@
   PERFETTO_DCHECK(IsJsonSupported());
 
 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
-  size_t lhs_end = std::min<size_t>(s.find('.'), s.size());
-  size_t rhs_start = std::min<size_t>(lhs_end + 1, s.size());
-  std::optional<int64_t> lhs = base::StringToInt64(s.substr(0, lhs_end));
-  std::optional<double> rhs =
-      base::StringToDouble("0." + s.substr(rhs_start, std::string::npos));
-  if ((!lhs.has_value() && lhs_end > 0) ||
-      (!rhs.has_value() && rhs_start < s.size())) {
-    return std::nullopt;
+  // 's' is formatted as a JSON Number, in microseconds
+  // goal: reformat 's' to be as an int, in nanoseconds
+  std::string s_as_ns = s;
+
+  // detect and remove scientific notation's exponents
+  int32_t exp_shift = 0;
+  if (size_t exp_start = s.find_first_of("eE");
+      exp_start != std::string::npos) {
+    const std::string exp_s = s.substr(exp_start + 1, s.size());
+    const std::optional<int32_t> exp = base::StringToInt32(exp_s);
+    if (!exp.has_value()) {
+      return std::nullopt;
+    }
+    s_as_ns.erase(exp_start);
+    exp_shift = *exp;
   }
-  return lhs.value_or(0) * 1000 +
-         static_cast<int64_t>(rhs.value_or(0) * 1000.0);
+
+  // detect and remove decimal separator
+  size_t int_size = s_as_ns.size();
+  if (size_t frac_start = s.find('.'); frac_start != std::string::npos) {
+    s_as_ns.erase(frac_start, 1);
+    int_size = frac_start;
+  }
+
+  // expand or shrink to the new size
+  constexpr int us_to_ns_shift = 3;
+  const size_t s_as_ns_size = size_t(
+      std::max<ptrdiff_t>(1, ptrdiff_t(int_size) + exp_shift + us_to_ns_shift));
+  s_as_ns.resize(s_as_ns_size, '0');  // pads or truncates
+
+  return base::StringToInt64(s_as_ns);
 #else
   perfetto::base::ignore_result(s);
   return std::nullopt;
diff --git a/src/trace_processor/importers/json/json_utils_unittest.cc b/src/trace_processor/importers/json/json_utils_unittest.cc
index 7f01ad6..5b17cc3 100644
--- a/src/trace_processor/importers/json/json_utils_unittest.cc
+++ b/src/trace_processor/importers/json/json_utils_unittest.cc
@@ -52,8 +52,29 @@
   ASSERT_EQ(CoerceToTs(Json::Value("42.0")).value_or(-1), 42000);
   ASSERT_EQ(CoerceToTs(Json::Value("0.2")).value_or(-1), 200);
   ASSERT_EQ(CoerceToTs(Json::Value("0.2e-1")).value_or(-1), 20);
+  ASSERT_EQ(CoerceToTs(Json::Value("0.2e-2")).value_or(-1), 2);
+  ASSERT_EQ(CoerceToTs(Json::Value("0.2e-3")).value_or(-1), 0);
+  ASSERT_EQ(CoerceToTs(Json::Value("1.692108548132154500e+15")).value_or(-1),
+            1'692'108'548'132'154'500);
+  ASSERT_EQ(CoerceToTs(Json::Value("1692108548132154.500")).value_or(-1),
+            1'692'108'548'132'154'500);
+  ASSERT_EQ(CoerceToTs(Json::Value("1.692108548132154501e+15")).value_or(-1),
+            1'692'108'548'132'154'501);
+  ASSERT_EQ(CoerceToTs(Json::Value("1692108548132154.501")).value_or(-1),
+            1'692'108'548'132'154'501);
+  ASSERT_EQ(CoerceToTs(Json::Value("-1.692108548132154500E+15")).value_or(-1),
+            -1'692'108'548'132'154'500);
+  ASSERT_EQ(CoerceToTs(Json::Value("-1692108548132154.500")).value_or(-1),
+            -1'692'108'548'132'154'500);
+  ASSERT_EQ(CoerceToTs(Json::Value("-1.692108548132154501E+15")).value_or(-1),
+            -1'692'108'548'132'154'501);
+  ASSERT_EQ(CoerceToTs(Json::Value("-1692108548132154.501")).value_or(-1),
+            -1'692'108'548'132'154'501);
+  ASSERT_EQ(CoerceToTs(Json::Value("-0")).value_or(-1), 0);
+  ASSERT_EQ(CoerceToTs(Json::Value("0")).value_or(-1), 0);
   ASSERT_EQ(CoerceToTs(Json::Value(".")).value_or(-1), 0);
   ASSERT_FALSE(CoerceToTs(Json::Value("1234!")).has_value());
+  ASSERT_FALSE(CoerceToTs(Json::Value("123e4!")).has_value());
 }
 
 }  // namespace
diff --git a/src/trace_processor/importers/proto/statsd_module.cc b/src/trace_processor/importers/proto/statsd_module.cc
index a3cce7a..06beb62 100644
--- a/src/trace_processor/importers/proto/statsd_module.cc
+++ b/src/trace_processor/importers/proto/statsd_module.cc
@@ -247,8 +247,12 @@
   SliceId slice = opt_slice.value();
   auto inserter = context_->args_tracker->AddArgsTo(slice);
   InserterDelegate delgate(inserter, *context_->storage.get());
-  args_parser_.ParseMessage(nested_bytes, kAtomProtoName,
-                            nullptr /* parse all fields */, delgate);
+  base::Status result = args_parser_.ParseMessage(
+      nested_bytes, kAtomProtoName, nullptr /* parse all fields */, delgate);
+  if (!result.ok()) {
+    PERFETTO_ELOG("%s", result.c_message());
+    context_->storage->IncrementStats(stats::atom_unknown);
+  }
 }
 
 StringId StatsdModule::GetAtomName(uint32_t atom_field_id) {
diff --git a/src/trace_processor/metrics/sql/android/BUILD.gn b/src/trace_processor/metrics/sql/android/BUILD.gn
index 28eb9d0..92da898 100644
--- a/src/trace_processor/metrics/sql/android/BUILD.gn
+++ b/src/trace_processor/metrics/sql/android/BUILD.gn
@@ -19,6 +19,7 @@
 
 perfetto_sql_source_set("android") {
   sources = [
+    "android_anr.sql",
     "android_batt.sql",
     "android_binder.sql",
     "android_blocking_calls_cuj_metric.sql",
@@ -60,6 +61,7 @@
     "android_task_names.sql",
     "android_trace_quality.sql",
     "android_trusty_workqueues.sql",
+    "anr/anrs.sql",
     "codec_metrics.sql",
     "composer_execution.sql",
     "composition_layers.sql",
diff --git a/src/trace_processor/metrics/sql/android/android_anr.sql b/src/trace_processor/metrics/sql/android/android_anr.sql
new file mode 100644
index 0000000..17d6f95
--- /dev/null
+++ b/src/trace_processor/metrics/sql/android/android_anr.sql
@@ -0,0 +1,34 @@
+--
+-- Copyright 2022 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+--     https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+-- Create the base table (`android_anr_anrs`) containing all ANRs found
+-- in the trace.
+SELECT RUN_METRIC('android/anr/anrs.sql');
+
+DROP VIEW IF EXISTS android_anr_output;
+CREATE VIEW android_anr_output AS
+SELECT
+  AndroidAnrMetric(
+    'anr', (
+      SELECT RepeatedField(
+        AndroidAnrMetric_Anr(
+          'process_name', process_name,
+          'pid', pid,
+          'subject', subject,
+          'error_id', error_id,
+          'ts', ts))
+      FROM android_anr_anrs
+    )
+  );
\ No newline at end of file
diff --git a/src/trace_processor/metrics/sql/android/anr/anrs.sql b/src/trace_processor/metrics/sql/android/anr/anrs.sql
new file mode 100644
index 0000000..769fe14
--- /dev/null
+++ b/src/trace_processor/metrics/sql/android/anr/anrs.sql
@@ -0,0 +1,50 @@
+--
+-- Copyright 2022 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+--     https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+-- One row per ANR that occurred in the trace.
+DROP TABLE IF EXISTS android_anr_anrs;
+CREATE TABLE android_anr_anrs AS
+-- Process and PID that ANRed.
+WITH anr_process AS (
+  SELECT
+    -- Counter formats:
+    -- v1: "ErrorId:<process_name>#<UUID>"
+    -- v2: "ErrorId:<process_name> <pid>#<UUID>"
+    STR_SPLIT(SUBSTR(STR_SPLIT(process_counter_track.name, '#', 0), 9), ' ', 0) AS process_name,
+    CAST(STR_SPLIT(SUBSTR(STR_SPLIT(process_counter_track.name, '#', 0), 9), ' ', 1) AS INT32) AS pid,
+    STR_SPLIT(process_counter_track.name, '#', 1) AS error_id,
+    counter.ts
+  FROM process_counter_track
+  JOIN process USING (upid)
+  JOIN counter ON (counter.track_id = process_counter_track.id)
+  WHERE process_counter_track.name GLOB 'ErrorId:*'
+    AND process.name = 'system_server'
+),
+-- ANR subject line.
+anr_subject AS (
+  --- Counter format:
+  --- "Subject(for ErrorId <UUID>):<subject>"
+  SELECT
+    SUBSTR(STR_SPLIT(process_counter_track.name, ')', 0), 21) AS error_id,
+    SUBSTR(process_counter_track.name, length(STR_SPLIT(process_counter_track.name, ')', 0)) + 3) AS subject
+  FROM process_counter_track
+  JOIN process
+  USING (upid)
+  WHERE process_counter_track.name GLOB 'Subject(for ErrorId *'
+  AND process.name = 'system_server'
+)
+SELECT *
+FROM anr_process
+LEFT JOIN anr_subject USING (error_id);
\ No newline at end of file
diff --git a/src/trace_processor/perfetto_sql/stdlib/experimental/thread_executing_span.sql b/src/trace_processor/perfetto_sql/stdlib/experimental/thread_executing_span.sql
index 4632457..8a1a337 100644
--- a/src/trace_processor/perfetto_sql/stdlib/experimental/thread_executing_span.sql
+++ b/src/trace_processor/perfetto_sql/stdlib/experimental/thread_executing_span.sql
@@ -13,6 +13,7 @@
 -- See the License for the specific language governing permissions and
 -- limitations under the License.
 --
+SELECT IMPORT('common.slices');
 
 -- A 'thread_executing_span' is thread_state span starting with a runnable slice
 -- until the next runnable slice that's woken up by a process (as opposed
@@ -645,3 +646,161 @@
     leaf_blocked_function
   FROM experimental_thread_executing_span_ancestors($thread_executing_span_id, $leaf_utid),
     trace_bounds;
+
+-- Critical path of thread_executing_spans 'span joined' with their thread states.
+-- See |experimental_thread_executing_span_critical_path|.
+--
+-- @arg leaf_utid INT                       Thread utid to filter critical paths for.
+--
+-- @column id                               Id of the first (runnable) thread state in thread_executing_span.
+-- @column thread_state_id                  Id of thread_state in the critical path.
+-- @column ts                               Timestamp of thread_state in the critical path.
+-- @column dur                              Duration of thread_state in the critical path.
+-- @column tid                              Tid of thread with thread_state.
+-- @column pid                              Pid of process with thread_state.
+-- @column utid                             Utid of thread with thread_state.
+-- @column upid                             Upid of process with thread_state.
+-- @column thread_name                      Name of thread with thread_state.
+-- @column process_name                     Name of process with thread_state.
+-- @column state                            Thread state of thread in the critical path.
+-- @column blocked_function                 Blocked function of thread in the critical path.
+-- @column height                           Tree height of thread_executing_span thread_state belongs to.
+-- @column leaf_utid                        Thread Utid the critical path was filtered to.
+CREATE PERFETTO FUNCTION experimental_thread_executing_span_critical_path_thread_states(leaf_utid INT)
+RETURNS TABLE(
+  id INT,
+  thread_state_id INT,
+  ts LONG,
+  dur LONG,
+  tid INT,
+  pid INT,
+  utid INT,
+  upid INT,
+  thread_name STRING,
+  process_name STRING,
+  state STRING,
+  blocked_function STRING,
+  height INT,
+  leaf_utid INT
+) AS
+WITH
+  span_starts AS (
+    SELECT
+      span.id,
+      thread_state.id AS thread_state_id,
+      MAX(thread_state.ts, span.ts) AS ts,
+      span.ts + span.dur AS span_end_ts,
+      thread_state.ts + thread_state.dur AS thread_state_end_ts,
+      span.tid,
+      span.pid,
+      span.utid,
+      span.upid,
+      span.thread_name,
+      span.process_name,
+      thread_state.state,
+      thread_state.blocked_function,
+      span.height,
+      span.leaf_utid
+    FROM experimental_thread_executing_span_critical_path(NULL, $leaf_utid) span
+    JOIN thread_state
+      ON
+        thread_state.utid = span.utid
+        AND ((thread_state.ts BETWEEN span.ts AND span.ts + span.dur)
+             OR (span.ts BETWEEN thread_state.ts AND thread_state.ts + thread_state.dur))
+  )
+SELECT
+  id,
+  thread_state_id,
+  ts,
+  MIN(span_end_ts, thread_state_end_ts) - ts AS dur,
+  tid,
+  pid,
+  utid,
+  upid,
+  thread_name,
+  process_name,
+  state,
+  blocked_function,
+  height,
+  leaf_utid
+FROM span_starts
+WHERE MIN(span_end_ts, thread_state_end_ts) - ts > 0;
+
+-- Critical path of thread_executing_spans 'span joined' with their slices.
+-- See |experimental_thread_executing_span_critical_path|.
+--
+-- @arg leaf_utid INT                       Thread utid to filter critical paths for.
+--
+-- @column id                               Id of the first (runnable) thread state in thread_executing_span.
+-- @column slice_id                         Id of slice in the critical path.
+-- @column ts                               Timestamp of slice in the critical path.
+-- @column dur                              Duration of slice in the critical path.
+-- @column tid                              Tid of thread that emitted the slice.
+-- @column pid                              Pid of process that emitted the slice.
+-- @column utid                             Utid of thread that emitted the slice.
+-- @column upid                             Upid of process that emitted the slice.
+-- @column thread_name                      Name of thread that emitted the slice.
+-- @column process_name                     Name of process that emitted the slice.
+-- @column slice_name                       Name of slice in the critical path.
+-- @column slice_depth                      Depth of slice in its slice stack in the critical path.
+-- @column height                           Tree height of thread_executing_span the slice belongs to.
+-- @column leaf_utid                        Thread Utid the critical path was filtered to.
+CREATE PERFETTO FUNCTION experimental_thread_executing_span_critical_path_slices(leaf_utid INT)
+RETURNS TABLE(
+  id INT,
+  slice_id INT,
+  ts LONG,
+  dur LONG,
+  tid INT,
+  pid INT,
+  utid INT,
+  upid INT,
+  thread_name STRING,
+  process_name STRING,
+  slice_name STRING,
+  slice_depth INT,
+  height INT,
+  leaf_utid INT
+) AS
+WITH
+  span_start AS (
+    SELECT
+      span.id,
+      slice.id AS slice_id,
+      MAX(slice.ts, span.ts) AS ts,
+      span.ts + span.dur AS span_end_ts,
+      slice.ts + slice.dur AS slice_end_ts,
+      span.tid,
+      span.pid,
+      span.utid,
+      span.upid,
+      span.thread_name,
+      span.process_name,
+      slice.name AS slice_name,
+      slice.depth AS slice_depth,
+      span.height,
+      span.leaf_utid
+    FROM experimental_thread_executing_span_critical_path(NULL, $leaf_utid) span
+    JOIN thread_slice slice
+      ON
+        slice.utid = span.utid
+        AND ((slice.ts BETWEEN span.ts AND span.ts + span.dur)
+             OR (span.ts BETWEEN slice.ts AND slice.ts + slice.dur))
+  )
+SELECT
+  id,
+  slice_id,
+  ts,
+  MIN(span_end_ts, slice_end_ts) - ts AS dur,
+  tid,
+  pid,
+  utid,
+  upid,
+  thread_name,
+  process_name,
+  slice_name,
+  slice_depth,
+  height,
+  leaf_utid
+FROM span_start
+WHERE MIN(span_end_ts, slice_end_ts) - ts > 0;
diff --git a/src/trace_processor/util/proto_to_args_parser.cc b/src/trace_processor/util/proto_to_args_parser.cc
index 8b387a6..633eb59 100644
--- a/src/trace_processor/util/proto_to_args_parser.cc
+++ b/src/trace_processor/util/proto_to_args_parser.cc
@@ -329,6 +329,9 @@
     case FieldDescriptorProto::TYPE_FLOAT:
       delegate.AddDouble(key_prefix_, static_cast<double>(field.as_float()));
       return base::OkStatus();
+    case FieldDescriptorProto::TYPE_BYTES:
+      delegate.AddBytes(key_prefix_, field.as_bytes());
+      return base::OkStatus();
     case FieldDescriptorProto::TYPE_STRING:
       delegate.AddString(key_prefix_, field.as_string());
       return base::OkStatus();
diff --git a/src/trace_processor/util/proto_to_args_parser.h b/src/trace_processor/util/proto_to_args_parser.h
index 9dd3e87..e797efd 100644
--- a/src/trace_processor/util/proto_to_args_parser.h
+++ b/src/trace_processor/util/proto_to_args_parser.h
@@ -87,6 +87,12 @@
     virtual void AddDouble(const Key& key, double value) = 0;
     virtual void AddPointer(const Key& key, const void* value) = 0;
     virtual void AddBoolean(const Key& key, bool value) = 0;
+    virtual void AddBytes(const Key& key, const protozero::ConstBytes& value) {
+      // In the absence of a better implementation default to showing
+      // bytes as string with the size:
+      std::string msg = "<bytes size=" + std::to_string(value.size) + ">";
+      AddString(key, msg);
+    }
     // Returns whether an entry was added or not.
     virtual bool AddJson(const Key& key,
                          const protozero::ConstChars& value) = 0;
@@ -137,10 +143,6 @@
   // |type| must be the fully qualified name, but with a '.' added to the
   // beginning. I.E. ".perfetto.protos.TrackEvent". And must match one of the
   // descriptors already added through |AddProtoFileDescriptor|.
-  //
-  // IMPORTANT: currently bytes fields are not supported.
-  //
-  // TODO(b/145578432): Add support for byte fields.
   base::Status ParseMessage(const protozero::ConstBytes& cb,
                             const std::string& type,
                             const std::vector<uint32_t>* allowed_fields,
diff --git a/src/trace_processor/util/proto_to_args_parser_unittest.cc b/src/trace_processor/util/proto_to_args_parser_unittest.cc
index b72fff0..5baf8f0 100644
--- a/src/trace_processor/util/proto_to_args_parser_unittest.cc
+++ b/src/trace_processor/util/proto_to_args_parser_unittest.cc
@@ -91,6 +91,13 @@
     args_.push_back(ss.str());
   }
 
+  void AddBytes(const Key& key, const protozero::ConstBytes& value) override {
+    std::stringstream ss;
+    ss << key.flat_key << " " << key.key << " <bytes size=" << value.size
+       << ">";
+    args_.push_back(ss.str());
+  }
+
   void AddDouble(const Key& key, double value) override {
     std::stringstream ss;
     ss << key.flat_key << " " << key.key << " " << value;
@@ -176,6 +183,7 @@
   msg->add_repeated_int32(-1);
   msg->add_repeated_int32(100);
   msg->add_repeated_int32(2000000);
+  msg->set_field_bytes({0, 1, 2});
 
   auto binary_proto = msg.SerializeAsArray();
 
@@ -212,7 +220,8 @@
           "repeated_int32 repeated_int32[0] 1",
           "repeated_int32 repeated_int32[1] -1",
           "repeated_int32 repeated_int32[2] 100",
-          "repeated_int32 repeated_int32[3] 2000000"));
+          "repeated_int32 repeated_int32[3] 2000000",
+          "field_bytes field_bytes <bytes size=3>"));
 }
 
 TEST_F(ProtoToArgsParserTest, NestedProto) {
diff --git a/src/traced/probes/probes_producer.cc b/src/traced/probes/probes_producer.cc
index f4528ab..0aff1db 100644
--- a/src/traced/probes/probes_producer.cc
+++ b/src/traced/probes/probes_producer.cc
@@ -502,7 +502,8 @@
 
 void ProbesProducer::Flush(FlushRequestID flush_request_id,
                            const DataSourceInstanceID* data_source_ids,
-                           size_t num_data_sources) {
+                           size_t num_data_sources,
+                           FlushFlags) {
   PERFETTO_DLOG("ProbesProducer::Flush(%" PRIu64 ") begin", flush_request_id);
   PERFETTO_DCHECK(flush_request_id);
   auto log_on_exit = base::OnScopeExit([&] {
diff --git a/src/traced/probes/probes_producer.h b/src/traced/probes/probes_producer.h
index b021ee5..e7a4203 100644
--- a/src/traced/probes/probes_producer.h
+++ b/src/traced/probes/probes_producer.h
@@ -56,7 +56,8 @@
   void OnTracingSetup() override;
   void Flush(FlushRequestID,
              const DataSourceInstanceID* data_source_ids,
-             size_t num_data_sources) override;
+             size_t num_data_sources,
+             FlushFlags) override;
   void ClearIncrementalState(const DataSourceInstanceID* data_source_ids,
                              size_t num_data_sources) override;
 
diff --git a/src/traced/service/builtin_producer.cc b/src/traced/service/builtin_producer.cc
index 6516100..b9c5a97 100644
--- a/src/traced/service/builtin_producer.cc
+++ b/src/traced/service/builtin_producer.cc
@@ -284,7 +284,8 @@
 
 void BuiltinProducer::Flush(FlushRequestID flush_id,
                             const DataSourceInstanceID* ds_ids,
-                            size_t num_ds_ids) {
+                            size_t num_ds_ids,
+                            FlushFlags) {
   for (size_t i = 0; i < num_ds_ids; i++) {
     auto meta_it = metatrace_.writers.find(ds_ids[i]);
     if (meta_it != metatrace_.writers.end()) {
diff --git a/src/traced/service/builtin_producer.h b/src/traced/service/builtin_producer.h
index 7f0d892..7eb3f94 100644
--- a/src/traced/service/builtin_producer.h
+++ b/src/traced/service/builtin_producer.h
@@ -43,7 +43,10 @@
   void OnConnect() override;
   void SetupDataSource(DataSourceInstanceID, const DataSourceConfig&) override;
   void StartDataSource(DataSourceInstanceID, const DataSourceConfig&) override;
-  void Flush(FlushRequestID, const DataSourceInstanceID*, size_t) override;
+  void Flush(FlushRequestID,
+             const DataSourceInstanceID*,
+             size_t,
+             FlushFlags) override;
   void StopDataSource(DataSourceInstanceID) override;
 
   // nops:
diff --git a/src/tracing/core/trace_buffer.cc b/src/tracing/core/trace_buffer.cc
index baa6a34..f0e9d31 100644
--- a/src/tracing/core/trace_buffer.cc
+++ b/src/tracing/core/trace_buffer.cc
@@ -110,6 +110,7 @@
     return;
   }
 
+  has_data_ = true;
 #if PERFETTO_DCHECK_IS_ON()
   changed_since_last_read_ = true;
 #endif
diff --git a/src/tracing/core/trace_buffer.h b/src/tracing/core/trace_buffer.h
index f6994ba..9dea557 100644
--- a/src/tracing/core/trace_buffer.h
+++ b/src/tracing/core/trace_buffer.h
@@ -213,6 +213,7 @@
                           bool chunk_complete,
                           const uint8_t* src,
                           size_t size);
+
   // Applies a batch of |patches| to the given chunk, if the given chunk is
   // still in the buffer. Does nothing if the given ChunkID is gone.
   // Returns true if the chunk has been found and patched, false otherwise.
@@ -281,9 +282,12 @@
   // TraceBuffer will CHECK().
   std::unique_ptr<TraceBuffer> CloneReadOnly() const;
 
+  void set_read_only() { read_only_ = true; }
   const WriterStatsMap& writer_stats() const { return writer_stats_; }
   const TraceStats::BufferStats& stats() const { return stats_; }
   size_t size() const { return size_; }
+  OverwritePolicy overwrite_policy() const { return overwrite_policy_; }
+  bool has_data() const { return has_data_; }
 
  private:
   friend class TraceBufferTest;
@@ -709,6 +713,11 @@
   // Per-{Producer, Writer} statistics.
   WriterStatsMap writer_stats_;
 
+  // Set to true upon the very first call to CopyChunkUntrusted() and never
+  // cleared. This is used to tell if the buffer has never been used since its
+  // creation (which in turn is used to optimize `clear_before_clone`).
+  bool has_data_ = false;
+
 #if PERFETTO_DCHECK_IS_ON()
   bool changed_since_last_read_ = false;
 #endif
diff --git a/src/tracing/core/tracing_service_impl.cc b/src/tracing/core/tracing_service_impl.cc
index d7f5634..9040ded 100644
--- a/src/tracing/core/tracing_service_impl.cc
+++ b/src/tracing/core/tracing_service_impl.cc
@@ -1748,7 +1748,8 @@
 
 void TracingServiceImpl::Flush(TracingSessionID tsid,
                                uint32_t timeout_ms,
-                               ConsumerEndpoint::FlushCallback callback) {
+                               ConsumerEndpoint::FlushCallback callback,
+                               FlushFlags flush_flags) {
   PERFETTO_DCHECK_THREAD(thread_checker_);
   TracingSession* tracing_session = GetTracingSession(tsid);
   if (!tracing_session) {
@@ -1794,7 +1795,7 @@
     ProducerID producer_id = kv.first;
     ProducerEndpointImpl* producer = GetProducer(producer_id);
     const std::vector<DataSourceInstanceID>& data_sources = kv.second;
-    producer->Flush(flush_request_id, data_sources);
+    producer->Flush(flush_request_id, data_sources, flush_flags);
     pending_flush.producers.insert(producer_id);
   }
 
@@ -2001,27 +2002,32 @@
   PERFETTO_DCHECK_THREAD(thread_checker_);
   PERFETTO_DLOG("Triggering final flush for %" PRIu64, tsid);
   auto weak_this = weak_ptr_factory_.GetWeakPtr();
-  Flush(tsid, 0, [weak_this, tsid](bool success) {
-    // This was a DLOG up to Jun 2021 (v16, Android S).
-    PERFETTO_LOG("FlushAndDisableTracing(%" PRIu64 ") done, success=%d", tsid,
-                 success);
-    if (!weak_this)
-      return;
-    TracingSession* session = weak_this->GetTracingSession(tsid);
-    session->final_flush_outcome = success ? TraceStats::FINAL_FLUSH_SUCCEEDED
+  Flush(
+      tsid, 0,
+      [weak_this, tsid](bool success) {
+        // This was a DLOG up to Jun 2021 (v16, Android S).
+        PERFETTO_LOG("FlushAndDisableTracing(%" PRIu64 ") done, success=%d",
+                     tsid, success);
+        if (!weak_this)
+          return;
+        TracingSession* session = weak_this->GetTracingSession(tsid);
+        session->final_flush_outcome = success
+                                           ? TraceStats::FINAL_FLUSH_SUCCEEDED
                                            : TraceStats::FINAL_FLUSH_FAILED;
-    if (session->consumer_maybe_null) {
-      // If the consumer is still attached, just disable the session but give it
-      // a chance to read the contents.
-      weak_this->DisableTracing(tsid);
-    } else {
-      // If the consumer detached, destroy the session. If the consumer did
-      // start the session in long-tracing mode, the service will have saved
-      // the contents to the passed file. If not, the contents will be
-      // destroyed.
-      weak_this->FreeBuffers(tsid);
-    }
-  });
+        if (session->consumer_maybe_null) {
+          // If the consumer is still attached, just disable the session but
+          // give it a chance to read the contents.
+          weak_this->DisableTracing(tsid);
+        } else {
+          // If the consumer detached, destroy the session. If the consumer did
+          // start the session in long-tracing mode, the service will have saved
+          // the contents to the passed file. If not, the contents will be
+          // destroyed.
+          weak_this->FreeBuffers(tsid);
+        }
+      },
+      FlushFlags(FlushFlags::Initiator::kTraced,
+                 FlushFlags::Reason::kTraceStop));
 }
 
 void TracingServiceImpl::PeriodicFlushTask(TracingSessionID tsid,
@@ -2045,10 +2051,14 @@
     return;
 
   PERFETTO_DLOG("Triggering periodic flush for trace session %" PRIu64, tsid);
-  Flush(tsid, 0, [](bool success) {
-    if (!success)
-      PERFETTO_ELOG("Periodic flush timed out");
-  });
+  Flush(
+      tsid, 0,
+      [](bool success) {
+        if (!success)
+          PERFETTO_ELOG("Periodic flush timed out");
+      },
+      FlushFlags(FlushFlags::Initiator::kTraced,
+                 FlushFlags::Reason::kPeriodic));
 }
 
 void TracingServiceImpl::PeriodicClearIncrementalStateTask(
@@ -3591,6 +3601,7 @@
 void TracingServiceImpl::FlushAndCloneSession(ConsumerEndpointImpl* consumer,
                                               TracingSessionID tsid) {
   PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto clone_target = FlushFlags::CloneTarget::kUnknown;
 
   if (tsid == kBugreportSessionId) {
     TracingSession* session = FindTracingSessionWithMaxBugreportScore();
@@ -3600,21 +3611,68 @@
       return;
     }
     tsid = session->id;
+    clone_target = FlushFlags::CloneTarget::kBugreport;
+  }
+
+  TracingSession* session = GetTracingSession(tsid);
+  if (!session) {
+    consumer->consumer_->OnSessionCloned(
+        {false, "Tracing session not found", {}});
+    return;
+  }
+
+  // If any of the buffers are marked as clear_before_clone, reset them before
+  // issuing the Flush(kCloneReason).
+  size_t buf_idx = 0;
+  for (BufferID src_buf_id : session->buffers_index) {
+    if (!session->config.buffers()[buf_idx++].clear_before_clone())
+      continue;
+    auto buf_iter = buffers_.find(src_buf_id);
+    PERFETTO_CHECK(buf_iter != buffers_.end());
+    std::unique_ptr<TraceBuffer>& buf = buf_iter->second;
+
+    // No need to reset the buffer if nothing has been written into it yet.
+    // This is the canonical case if producers behive nicely and don't timeout
+    // the handling of writes during the flush.
+    // This check avoids a useless re-mmap upon every Clone() if the buffer is
+    // already empty (when used in combination with `transfer_on_clone`).
+    if (!buf->has_data())
+      continue;
+
+    // Some leftover data was left in the buffer. Recreate it to empty it.
+    const auto buf_policy = buf->overwrite_policy();
+    const auto buf_size = buf->size();
+    std::unique_ptr<TraceBuffer> old_buf = std::move(buf);
+    buf = TraceBuffer::Create(buf_size, buf_policy);
+    if (!buf) {
+      // This is extremely rare but could happen on 32-bit. If the new buffer
+      // allocation failed, put back the buffer where it was and fail the clone.
+      // We cannot leave the original tracing session buffer-less as it would
+      // cause crashes when data sources commit new data.
+      buf = std::move(old_buf);
+      consumer->consumer_->OnSessionCloned(
+          {false, "Buffer allocation failed while attempting to clone", {}});
+      return;
+    }
   }
 
   auto weak_this = weak_ptr_factory_.GetWeakPtr();
   auto weak_consumer = consumer->GetWeakPtr();
-  Flush(tsid, 0, [weak_this, tsid, weak_consumer](bool final_flush_outcome) {
-    PERFETTO_LOG("FlushAndCloneSession(%" PRIu64 ") started, success=%d", tsid,
-                 final_flush_outcome);
-    if (!weak_this || !weak_consumer)
-      return;
-    base::Uuid uuid;
-    base::Status result = weak_this->DoCloneSession(&*weak_consumer, tsid,
-                                                    final_flush_outcome, &uuid);
-    weak_consumer->consumer_->OnSessionCloned(
-        {result.ok(), result.message(), uuid});
-  });
+  Flush(
+      tsid, 0,
+      [weak_this, tsid, weak_consumer](bool final_flush_outcome) {
+        PERFETTO_LOG("FlushAndCloneSession(%" PRIu64 ") started, success=%d",
+                     tsid, final_flush_outcome);
+        if (!weak_this || !weak_consumer)
+          return;
+        base::Uuid uuid;
+        base::Status result = weak_this->DoCloneSession(
+            &*weak_consumer, tsid, final_flush_outcome, &uuid);
+        weak_consumer->consumer_->OnSessionCloned(
+            {result.ok(), result.message(), uuid});
+      },
+      FlushFlags(FlushFlags::Initiator::kTraced,
+                 FlushFlags::Reason::kTraceClone, clone_target));
 }
 
 base::Status TracingServiceImpl::DoCloneSession(ConsumerEndpointImpl* consumer,
@@ -3647,13 +3705,31 @@
   // happens bail out early before creating any session.
   std::vector<std::pair<BufferID, std::unique_ptr<TraceBuffer>>> buf_snaps;
   buf_snaps.reserve(src->num_buffers());
+  PERFETTO_DCHECK(src->num_buffers() == src->config.buffers().size());
   bool buf_clone_failed = false;
+  size_t buf_idx = 0;
   for (BufferID src_buf_id : src->buffers_index) {
-    TraceBuffer* src_buf = GetBufferByID(src_buf_id);
-    std::unique_ptr<TraceBuffer> buf_snap = src_buf->CloneReadOnly();
+    auto buf_iter = buffers_.find(src_buf_id);
+    PERFETTO_CHECK(buf_iter != buffers_.end());
+    std::unique_ptr<TraceBuffer>& src_buf = buf_iter->second;
+    std::unique_ptr<TraceBuffer> new_buf;
+    if (src->config.buffers()[buf_idx].transfer_on_clone()) {
+      const auto buf_policy = src_buf->overwrite_policy();
+      const auto buf_size = src_buf->size();
+      new_buf = std::move(src_buf);
+      src_buf = TraceBuffer::Create(buf_size, buf_policy);
+      if (!src_buf) {
+        // If the allocation fails put the buffer back and let the code below
+        // handle the failure gracefully.
+        src_buf = std::move(new_buf);
+      }
+    } else {
+      new_buf = src_buf->CloneReadOnly();
+    }
     BufferID buf_global_id = buffer_ids_.Allocate();
-    buf_clone_failed |= !buf_snap.get() || !buf_global_id;
-    buf_snaps.emplace_back(buf_global_id, std::move(buf_snap));
+    buf_clone_failed |= !new_buf.get() || !buf_global_id;
+    buf_snaps.emplace_back(buf_global_id, std::move(new_buf));
+    ++buf_idx;
   }
 
   // Free up allocated IDs in case of failure. No need to free the TraceBuffers,
@@ -3687,6 +3763,11 @@
   for (auto& kv : buf_snaps) {
     BufferID buf_global_id = kv.first;
     std::unique_ptr<TraceBuffer>& buf = kv.second;
+    // This is only needed for transfer_on_clone. Other buffers are already
+    // marked as read-only by CloneReadOnly(). We cannot do this early because
+    // in case of an allocation failure we will put std::move() the original
+    // buffer back in its place and in that case should not be made read-only.
+    buf->set_read_only();
     buffers_.emplace(buf_global_id, std::move(buf));
     cloned_session->buffers_index.emplace_back(buf_global_id);
   }
@@ -3826,13 +3907,14 @@
 }
 
 void TracingServiceImpl::ConsumerEndpointImpl::Flush(uint32_t timeout_ms,
-                                                     FlushCallback callback) {
+                                                     FlushCallback callback,
+                                                     FlushFlags flush_flags) {
   PERFETTO_DCHECK_THREAD(thread_checker_);
   if (!tracing_session_id_) {
     PERFETTO_LOG("Consumer called Flush() but tracing was not active");
     return;
   }
-  service_->Flush(tracing_session_id_, timeout_ms, callback);
+  service_->Flush(tracing_session_id_, timeout_ms, callback, flush_flags);
 }
 
 void TracingServiceImpl::ConsumerEndpointImpl::Detach(const std::string& key) {
@@ -4319,15 +4401,17 @@
 
 void TracingServiceImpl::ProducerEndpointImpl::Flush(
     FlushRequestID flush_request_id,
-    const std::vector<DataSourceInstanceID>& data_sources) {
+    const std::vector<DataSourceInstanceID>& data_sources,
+    FlushFlags flush_flags) {
   PERFETTO_DCHECK_THREAD(thread_checker_);
   auto weak_this = weak_ptr_factory_.GetWeakPtr();
-  task_runner_->PostTask([weak_this, flush_request_id, data_sources] {
-    if (weak_this) {
-      weak_this->producer_->Flush(flush_request_id, data_sources.data(),
-                                  data_sources.size());
-    }
-  });
+  task_runner_->PostTask(
+      [weak_this, flush_request_id, data_sources, flush_flags] {
+        if (weak_this) {
+          weak_this->producer_->Flush(flush_request_id, data_sources.data(),
+                                      data_sources.size(), flush_flags);
+        }
+      });
 }
 
 void TracingServiceImpl::ProducerEndpointImpl::SetupDataSource(
diff --git a/src/tracing/core/tracing_service_impl.h b/src/tracing/core/tracing_service_impl.h
index 1a146c9..e9c85ee 100644
--- a/src/tracing/core/tracing_service_impl.h
+++ b/src/tracing/core/tracing_service_impl.h
@@ -136,7 +136,9 @@
     void SetupDataSource(DataSourceInstanceID, const DataSourceConfig&);
     void StartDataSource(DataSourceInstanceID, const DataSourceConfig&);
     void StopDataSource(DataSourceInstanceID);
-    void Flush(FlushRequestID, const std::vector<DataSourceInstanceID>&);
+    void Flush(FlushRequestID,
+               const std::vector<DataSourceInstanceID>&,
+               FlushFlags);
     void OnFreeBuffers(const std::vector<BufferID>& target_buffers);
     void ClearIncrementalState(const std::vector<DataSourceInstanceID>&);
 
@@ -219,7 +221,7 @@
     void DisableTracing() override;
     void ReadBuffers() override;
     void FreeBuffers() override;
-    void Flush(uint32_t timeout_ms, FlushCallback) override;
+    void Flush(uint32_t timeout_ms, FlushCallback, FlushFlags) override;
     void Detach(const std::string& key) override;
     void Attach(const std::string& key) override;
     void GetTraceStats() override;
@@ -306,7 +308,8 @@
   void DisableTracing(TracingSessionID, bool disable_immediately = false);
   void Flush(TracingSessionID tsid,
              uint32_t timeout_ms,
-             ConsumerEndpoint::FlushCallback);
+             ConsumerEndpoint::FlushCallback,
+             FlushFlags);
   void FlushAndDisableTracing(TracingSessionID);
   void FlushAndCloneSession(ConsumerEndpointImpl*, TracingSessionID);
 
diff --git a/src/tracing/core/tracing_service_impl_unittest.cc b/src/tracing/core/tracing_service_impl_unittest.cc
index 7583d33..f5a8591 100644
--- a/src/tracing/core/tracing_service_impl_unittest.cc
+++ b/src/tracing/core/tracing_service_impl_unittest.cc
@@ -786,7 +786,8 @@
   bool flushed_writer_1 = false;
   bool flushed_writer_2 = false;
   auto flush_correct_writer = [&](FlushRequestID flush_req_id,
-                                  const DataSourceInstanceID* id, size_t) {
+                                  const DataSourceInstanceID* id, size_t,
+                                  FlushFlags) {
     if (*id == id1) {
       flushed_writer_1 = true;
       writer1->Flush();
@@ -797,7 +798,9 @@
       producer->endpoint()->NotifyFlushComplete(flush_req_id);
     }
   };
-  EXPECT_CALL(*producer, Flush(_, _, _))
+  FlushFlags flush_flags(FlushFlags::Initiator::kTraced,
+                         FlushFlags::Reason::kTraceStop);
+  EXPECT_CALL(*producer, Flush(_, _, _, flush_flags))
       .WillOnce(Invoke(flush_correct_writer))
       .WillOnce(Invoke(flush_correct_writer));
 
@@ -2111,7 +2114,9 @@
       }));
   consumer2->CloneSession(1);
   // CloneSession() will implicitly issue a flush. Linearize with that.
-  producer->ExpectFlush(std::vector<TraceWriter*>{writer.get()});
+  FlushFlags expected_flags(FlushFlags::Initiator::kTraced,
+                            FlushFlags::Reason::kTraceClone);
+  producer->ExpectFlush(writer.get(), /*reply=*/true, expected_flags);
   task_runner.RunUntilCheckpoint("clone_done");
 
   // Delete the initial tracing session.
@@ -2454,7 +2459,9 @@
   }
 
   auto flush_request = consumer->Flush();
-  producer->ExpectFlush(writer.get());
+  FlushFlags expected_flags(FlushFlags::Initiator::kConsumerSdk,
+                            FlushFlags::Reason::kExplicit);
+  producer->ExpectFlush(writer.get(), /*reply=*/true, expected_flags);
   ASSERT_TRUE(flush_request.WaitForReply());
 
   consumer->DisableTracing();
@@ -2492,7 +2499,9 @@
     tp->set_for_testing()->set_str("payload");
   }
 
-  producer->ExpectFlush(writer.get());
+  FlushFlags expected_flags(FlushFlags::Initiator::kTraced,
+                            FlushFlags::Reason::kTraceStop);
+  producer->ExpectFlush(writer.get(), /*reply=*/true, expected_flags);
 
   producer->WaitForDataSourceStop("data_source");
   consumer->WaitForTracingDisabled();
@@ -2591,10 +2600,13 @@
   const int kNumFlushes = 3;
   auto checkpoint = task_runner.CreateCheckpoint("all_flushes_done");
   int flushes_seen = 0;
-  EXPECT_CALL(*producer, Flush(_, _, _))
+  FlushFlags flush_flags(FlushFlags::Initiator::kTraced,
+                         FlushFlags::Reason::kPeriodic);
+  EXPECT_CALL(*producer, Flush(_, _, _, flush_flags))
       .WillRepeatedly(Invoke([&producer, &writer, &flushes_seen, checkpoint](
                                  FlushRequestID flush_req_id,
-                                 const DataSourceInstanceID*, size_t) {
+                                 const DataSourceInstanceID*, size_t,
+                                 FlushFlags) {
         {
           auto tp = writer->NewTracePacket();
           char payload[32];
@@ -4709,6 +4721,10 @@
   std::unique_ptr<MockConsumer> consumer = CreateMockConsumer();
   consumer->Connect(svc.get());
 
+  std::unique_ptr<MockProducer> producer = CreateMockProducer();
+  producer->Connect(svc.get(), "mock_producer");
+  producer->RegisterDataSource("ds_1");
+
   // The consumer that clones it and reads back the data.
   std::unique_ptr<MockConsumer> consumer2 = CreateMockConsumer();
   consumer2->Connect(svc.get(), 1234);
@@ -4716,9 +4732,18 @@
   TraceConfig trace_config;
   trace_config.add_buffers()->set_size_kb(32);
   trace_config.set_bugreport_score(1);
+  auto* ds_cfg = trace_config.add_data_sources()->mutable_config();
+  ds_cfg->set_name("ds_1");
 
+  EXPECT_CALL(*producer, SetupDataSource(_, _));
+  EXPECT_CALL(*producer, StartDataSource(_, _));
   consumer->EnableTracing(trace_config);
+  producer->WaitForTracingSetup();
+
   auto flush_request = consumer->Flush();
+  FlushFlags flush_flags(FlushFlags::Initiator::kConsumerSdk,
+                         FlushFlags::Reason::kExplicit);
+  producer->ExpectFlush({}, /*reply=*/true, flush_flags);
   ASSERT_TRUE(flush_request.WaitForReply());
 
   auto clone_done = task_runner.CreateCheckpoint("clone_done");
@@ -4727,10 +4752,194 @@
         clone_done();
         ASSERT_TRUE(args.success);
       }));
-  consumer2->CloneSession(1);
+
+  FlushFlags flush_flags2(FlushFlags::Initiator::kTraced,
+                          FlushFlags::Reason::kTraceClone,
+                          FlushFlags::CloneTarget::kBugreport);
+  producer->ExpectFlush({}, /*reply=*/true, flush_flags2);
+
+  consumer2->CloneSession(kBugreportSessionId);
   task_runner.RunUntilCheckpoint("clone_done");
 }
 
+TEST_F(TracingServiceImplTest, TransferOnClone) {
+  // The consumer the creates the initial tracing session.
+  std::unique_ptr<MockConsumer> consumer = CreateMockConsumer();
+  consumer->Connect(svc.get());
+
+  std::unique_ptr<MockProducer> producer = CreateMockProducer();
+  producer->Connect(svc.get(), "mock_producer");
+
+  // Create two data sources, as we'll write on two distinct buffers.
+  producer->RegisterDataSource("ds_1");
+  producer->RegisterDataSource("ds_2");
+
+  TraceConfig trace_config;
+  trace_config.add_buffers()->set_size_kb(1024);  // Buf 0.
+  auto* buf1_cfg = trace_config.add_buffers();    // Buf 1 (transfer_on_clone).
+  buf1_cfg->set_size_kb(1024);
+  buf1_cfg->set_transfer_on_clone(true);
+  auto* ds_cfg = trace_config.add_data_sources()->mutable_config();
+  ds_cfg->set_name("ds_1");
+  ds_cfg->set_target_buffer(0);
+  ds_cfg = trace_config.add_data_sources()->mutable_config();
+  ds_cfg->set_name("ds_2");
+  ds_cfg->set_target_buffer(1);
+
+  consumer->EnableTracing(trace_config);
+  producer->WaitForTracingSetup();
+
+  producer->WaitForDataSourceSetup("ds_1");
+  producer->WaitForDataSourceSetup("ds_2");
+
+  producer->WaitForDataSourceStart("ds_1");
+  producer->WaitForDataSourceStart("ds_2");
+
+  std::unique_ptr<TraceWriter> writers[] = {
+      producer->CreateTraceWriter("ds_1"),
+      producer->CreateTraceWriter("ds_2"),
+  };
+
+  // Write once in the first buffer. This is expected persist across clones.
+  static constexpr int kNumTestPackets = 10;
+  for (int n = 0; n < kNumTestPackets; n++) {
+    auto tp = writers[0]->NewTracePacket();
+    base::StackString<64> payload("persistent_%d", n);
+    tp->set_for_testing()->set_str(payload.c_str(), payload.len());
+  }
+
+  const int kLastIteration = 3;
+  for (int iteration = 1; iteration <= kLastIteration; iteration++) {
+    // The consumer the creates the initial tracing session.
+    std::unique_ptr<MockConsumer> clone_consumer = CreateMockConsumer();
+    clone_consumer->Connect(svc.get());
+
+    // Add some new data to the 2nd buffer, which is transferred.
+    // Omit the writing the last iteration to test we get an empty buffer.
+    for (int n = 0; n < kNumTestPackets && iteration != kLastIteration; n++) {
+      auto tp = writers[1]->NewTracePacket();
+      base::StackString<64> payload("transferred_%d_%d", iteration, n);
+      tp->set_for_testing()->set_str(payload.c_str(), payload.len());
+    }
+
+    std::string clone_checkpoint_name = "clone_" + std::to_string(iteration);
+    auto clone_done = task_runner.CreateCheckpoint(clone_checkpoint_name);
+    base::Uuid clone_uuid;
+    EXPECT_CALL(*clone_consumer, OnSessionCloned(_))
+        .WillOnce(InvokeWithoutArgs(clone_done));
+    clone_consumer->CloneSession(1);
+
+    // CloneSession() will implicitly issue a flush. Linearize with that.
+    producer->ExpectFlush({writers[0].get(), writers[1].get()});
+    task_runner.RunUntilCheckpoint(clone_checkpoint_name);
+
+    auto packets = clone_consumer->ReadBuffers();
+    std::vector<std::string> actual_payloads;
+    for (const auto& packet : packets) {
+      if (packet.has_for_testing())
+        actual_payloads.emplace_back(packet.for_testing().str());
+    }
+    std::vector<std::string> expected_payloads;
+    for (int n = 0; n < kNumTestPackets; n++) {
+      base::StackString<64> expected_payload("persistent_%d", n);
+      expected_payloads.emplace_back(expected_payload.ToStdString());
+    }
+    for (int n = 0; n < kNumTestPackets && iteration != kLastIteration; n++) {
+      base::StackString<64> expected_payload("transferred_%d_%d", iteration, n);
+      expected_payloads.emplace_back(expected_payload.ToStdString());
+    }
+    ASSERT_THAT(actual_payloads, ElementsAreArray(expected_payloads));
+  }  // for (iteration)
+
+  consumer->DisableTracing();
+  producer->WaitForDataSourceStop("ds_1");
+  producer->WaitForDataSourceStop("ds_2");
+  consumer->WaitForTracingDisabled();
+
+  // Read the data from the primary (non-cloned) tracing session. Check that
+  // it doesn't have any "transferred_xxx" payload but only the "persistent_xxx"
+  // coming from the standard non-transferred buffer.
+  auto packets = consumer->ReadBuffers();
+  EXPECT_THAT(packets,
+              Not(Contains(Property(&protos::gen::TracePacket::for_testing,
+                                    Property(&protos::gen::TestEvent::str,
+                                             HasSubstr("transferred_"))))));
+  EXPECT_THAT(packets, Contains(Property(&protos::gen::TracePacket::for_testing,
+                                         Property(&protos::gen::TestEvent::str,
+                                                  HasSubstr("persistent_")))));
+}
+
+TEST_F(TracingServiceImplTest, ClearBeforeClone) {
+  // The consumer the creates the initial tracing session.
+  std::unique_ptr<MockConsumer> consumer = CreateMockConsumer();
+  consumer->Connect(svc.get());
+
+  std::unique_ptr<MockProducer> producer = CreateMockProducer();
+  producer->Connect(svc.get(), "mock_producer");
+
+  producer->RegisterDataSource("ds_1");
+
+  TraceConfig trace_config;
+  // Unused. This buffer is created only to make the test less trivial and cover
+  // the case of the clear-bufferd to be the beyond the 0th entry.
+  trace_config.add_buffers()->set_size_kb(32);
+
+  auto* buf_cfg = trace_config.add_buffers();
+  buf_cfg->set_size_kb(1024);
+  buf_cfg->set_clear_before_clone(true);
+  auto* ds_cfg = trace_config.add_data_sources()->mutable_config();
+  ds_cfg->set_name("ds_1");
+  ds_cfg->set_target_buffer(1);
+
+  consumer->EnableTracing(trace_config);
+  producer->WaitForTracingSetup();
+  producer->WaitForDataSourceSetup("ds_1");
+  producer->WaitForDataSourceStart("ds_1");
+
+  std::unique_ptr<TraceWriter> writer = producer->CreateTraceWriter("ds_1");
+
+  // These packets, emitted before the clone, should be dropped.
+  for (int i = 0; i < 3; i++) {
+    writer->NewTracePacket()->set_for_testing()->set_str("before_clone");
+  }
+  auto flush_request = consumer->Flush();
+  producer->ExpectFlush(writer.get());
+  ASSERT_TRUE(flush_request.WaitForReply());
+
+  // The consumer the creates the initial tracing session.
+  std::unique_ptr<MockConsumer> clone_consumer = CreateMockConsumer();
+  clone_consumer->Connect(svc.get());
+
+  auto clone_done = task_runner.CreateCheckpoint("clone_done");
+  EXPECT_CALL(*clone_consumer, OnSessionCloned(_))
+      .WillOnce(InvokeWithoutArgs(clone_done));
+  clone_consumer->CloneSession(1);
+
+  // CloneSession() will implicitly issue a flush. Write some other packets
+  // in that callback. Those are the only ones that should survive in the cloned
+  // session.
+  FlushFlags flush_flags(FlushFlags::Initiator::kTraced,
+                         FlushFlags::Reason::kTraceClone);
+  EXPECT_CALL(*producer, Flush(_, _, _, flush_flags))
+      .WillOnce(Invoke([&](FlushRequestID flush_req_id,
+                           const DataSourceInstanceID*, size_t, FlushFlags) {
+        writer->NewTracePacket()->set_for_testing()->set_str("after_clone");
+        writer->Flush(
+            [&] { producer->endpoint()->NotifyFlushComplete(flush_req_id); });
+      }));
+
+  task_runner.RunUntilCheckpoint("clone_done");
+
+  auto packets = clone_consumer->ReadBuffers();
+  EXPECT_THAT(packets,
+              Not(Contains(Property(&protos::gen::TracePacket::for_testing,
+                                    Property(&protos::gen::TestEvent::str,
+                                             HasSubstr("before_clone"))))));
+  EXPECT_THAT(packets, Contains(Property(&protos::gen::TracePacket::for_testing,
+                                         Property(&protos::gen::TestEvent::str,
+                                                  HasSubstr("after_clone")))));
+}
+
 TEST_F(TracingServiceImplTest, InvalidBufferSizes) {
   std::unique_ptr<MockConsumer> consumer = CreateMockConsumer();
   consumer->Connect(svc.get());
diff --git a/src/tracing/core/virtual_destructors.cc b/src/tracing/core/virtual_destructors.cc
index d38a776..6c94741 100644
--- a/src/tracing/core/virtual_destructors.cc
+++ b/src/tracing/core/virtual_destructors.cc
@@ -40,4 +40,18 @@
 void ConsumerEndpoint::CloneSession(TracingSessionID) {}
 void Consumer::OnSessionCloned(const OnSessionClonedArgs&) {}
 
+void ConsumerEndpoint::Flush(uint32_t, FlushCallback, FlushFlags) {
+  // In the perfetto codebase, this 3-arg Flush is always overridden and this
+  // FATAL is never reached. The only case where this is used is in
+  // arctraceservice's PerfettoClient_test.cpp. That test mocks the old
+  // 2-arg version of Flush but doesn't actually invoke the 3-arg version.
+  PERFETTO_FATAL("ConsumerEndpoint::Flush(3) not implemented");
+}
+
+void ConsumerEndpoint::Flush(uint32_t timeout_ms, FlushCallback callback) {
+  // This 2-arg version of Flush() is invoked by arctraceservice's
+  // PerfettoClient::Flush().
+  Flush(timeout_ms, std::move(callback), FlushFlags(0));
+}
+
 }  // namespace perfetto
diff --git a/src/tracing/internal/tracing_backend_fake.cc b/src/tracing/internal/tracing_backend_fake.cc
index 4b0d31e..438da32 100644
--- a/src/tracing/internal/tracing_backend_fake.cc
+++ b/src/tracing/internal/tracing_backend_fake.cc
@@ -115,7 +115,9 @@
   void StartTracing() override {}
   void DisableTracing() override {}
 
-  void Flush(uint32_t /*timeout_ms*/, FlushCallback callback) override {
+  void Flush(uint32_t /*timeout_ms*/,
+             FlushCallback callback,
+             FlushFlags) override {
     callback(/*success=*/false);
   }
 
diff --git a/src/tracing/internal/tracing_muxer_impl.cc b/src/tracing/internal/tracing_muxer_impl.cc
index e953823..738cf31 100644
--- a/src/tracing/internal/tracing_muxer_impl.cc
+++ b/src/tracing/internal/tracing_muxer_impl.cc
@@ -296,14 +296,15 @@
 void TracingMuxerImpl::ProducerImpl::Flush(
     FlushRequestID flush_id,
     const DataSourceInstanceID* instances,
-    size_t instance_count) {
+    size_t instance_count,
+    FlushFlags flush_flags) {
   PERFETTO_DCHECK_THREAD(thread_checker_);
   bool all_handled = true;
   if (muxer_) {
     for (size_t i = 0; i < instance_count; i++) {
       DataSourceInstanceID ds_id = instances[i];
-      bool handled =
-          muxer_->FlushDataSource_AsyncBegin(backend_id_, ds_id, flush_id);
+      bool handled = muxer_->FlushDataSource_AsyncBegin(backend_id_, ds_id,
+                                                        flush_id, flush_flags);
       if (!handled) {
         pending_flushes_[flush_id].insert(ds_id);
         all_handled = false;
@@ -1115,34 +1116,33 @@
     InterceptorFactory factory,
     InterceptorBase::TLSFactory tls_factory,
     InterceptorBase::TracePacketCallback packet_callback) {
-  task_runner_->PostTask(
-      [this, descriptor, factory, tls_factory, packet_callback] {
-        // Ignore repeated registrations.
-        for (const auto& interceptor : interceptors_) {
-          if (interceptor.descriptor.name() == descriptor.name()) {
-            PERFETTO_DCHECK(interceptor.tls_factory == tls_factory);
-            PERFETTO_DCHECK(interceptor.packet_callback == packet_callback);
-            return;
-          }
-        }
-        // Only allow certain interceptors for now.
-        if (descriptor.name() != "test_interceptor" &&
-            descriptor.name() != "console" &&
-            descriptor.name() != "etwexport") {
-          PERFETTO_ELOG(
-              "Interceptors are experimental. If you want to use them, please "
-              "get in touch with the project maintainers "
-              "(https://perfetto.dev/docs/contributing/"
-              "getting-started#community).");
-          return;
-        }
-        interceptors_.emplace_back();
-        RegisteredInterceptor& interceptor = interceptors_.back();
-        interceptor.descriptor = descriptor;
-        interceptor.factory = factory;
-        interceptor.tls_factory = tls_factory;
-        interceptor.packet_callback = packet_callback;
-      });
+  task_runner_->PostTask([this, descriptor, factory, tls_factory,
+                          packet_callback] {
+    // Ignore repeated registrations.
+    for (const auto& interceptor : interceptors_) {
+      if (interceptor.descriptor.name() == descriptor.name()) {
+        PERFETTO_DCHECK(interceptor.tls_factory == tls_factory);
+        PERFETTO_DCHECK(interceptor.packet_callback == packet_callback);
+        return;
+      }
+    }
+    // Only allow certain interceptors for now.
+    if (descriptor.name() != "test_interceptor" &&
+        descriptor.name() != "console" && descriptor.name() != "etwexport") {
+      PERFETTO_ELOG(
+          "Interceptors are experimental. If you want to use them, please "
+          "get in touch with the project maintainers "
+          "(https://perfetto.dev/docs/contributing/"
+          "getting-started#community).");
+      return;
+    }
+    interceptors_.emplace_back();
+    RegisteredInterceptor& interceptor = interceptors_.back();
+    interceptor.descriptor = descriptor;
+    interceptor.factory = factory;
+    interceptor.tls_factory = tls_factory;
+    interceptor.packet_callback = packet_callback;
+  });
 }
 
 void TracingMuxerImpl::ActivateTriggers(
@@ -1629,7 +1629,8 @@
 bool TracingMuxerImpl::FlushDataSource_AsyncBegin(
     TracingBackendId backend_id,
     DataSourceInstanceID instance_id,
-    FlushRequestID flush_id) {
+    FlushRequestID flush_id,
+    FlushFlags flush_flags) {
   PERFETTO_DLOG("Flushing data source %" PRIu64, instance_id);
   auto ds = FindDataSource(backend_id, instance_id);
   if (!ds) {
@@ -1640,6 +1641,7 @@
   uint32_t backend_connection_id = ds.internal_state->backend_connection_id;
 
   FlushArgsImpl flush_args;
+  flush_args.flush_flags = flush_flags;
   flush_args.internal_instance_index = ds.instance_idx;
   flush_args.async_flush_closure = [this, backend_id, backend_connection_id,
                                     instance_id, ds, flush_id] {
@@ -1924,7 +1926,11 @@
     return;
   }
 
-  consumer->service_->Flush(timeout_ms, std::move(callback));
+  // For now we don't want to expose the flush reason to the consumer-side SDK
+  // users to avoid misuses until there is a strong need.
+  consumer->service_->Flush(timeout_ms, std::move(callback),
+                            FlushFlags(FlushFlags::Initiator::kConsumerSdk,
+                                       FlushFlags::Reason::kExplicit));
 }
 
 void TracingMuxerImpl::StopTracingSession(TracingSessionGlobalID session_id) {
diff --git a/src/tracing/internal/tracing_muxer_impl.h b/src/tracing/internal/tracing_muxer_impl.h
index 220f96b..ab132f3 100644
--- a/src/tracing/internal/tracing_muxer_impl.h
+++ b/src/tracing/internal/tracing_muxer_impl.h
@@ -228,7 +228,10 @@
     void StartDataSource(DataSourceInstanceID,
                          const DataSourceConfig&) override;
     void StopDataSource(DataSourceInstanceID) override;
-    void Flush(FlushRequestID, const DataSourceInstanceID*, size_t) override;
+    void Flush(FlushRequestID,
+               const DataSourceInstanceID*,
+               size_t,
+               FlushFlags) override;
     void ClearIncrementalState(const DataSourceInstanceID*, size_t) override;
 
     bool SweepDeadServices();
@@ -511,7 +514,8 @@
                                const FindDataSourceRes&);
   bool FlushDataSource_AsyncBegin(TracingBackendId,
                                   DataSourceInstanceID,
-                                  FlushRequestID);
+                                  FlushRequestID,
+                                  FlushFlags);
   void FlushDataSource_AsyncEnd(TracingBackendId,
                                 uint32_t backend_connection_id,
                                 DataSourceInstanceID,
diff --git a/src/tracing/ipc/consumer/consumer_ipc_client_impl.cc b/src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
index cf34361..a729d3b 100644
--- a/src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
+++ b/src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
@@ -221,7 +221,9 @@
   consumer_port_.FreeBuffers(req, std::move(async_response));
 }
 
-void ConsumerIPCClientImpl::Flush(uint32_t timeout_ms, FlushCallback callback) {
+void ConsumerIPCClientImpl::Flush(uint32_t timeout_ms,
+                                  FlushCallback callback,
+                                  FlushFlags flush_flags) {
   if (!connected_) {
     PERFETTO_DLOG("Cannot Flush(), not connected to tracing service");
     return callback(/*success=*/false);
@@ -229,6 +231,7 @@
 
   protos::gen::FlushRequest req;
   req.set_timeout_ms(static_cast<uint32_t>(timeout_ms));
+  req.set_flags(flush_flags.flags());
   ipc::Deferred<protos::gen::FlushResponse> async_response;
   async_response.Bind(
       [callback](ipc::AsyncResult<protos::gen::FlushResponse> response) {
diff --git a/src/tracing/ipc/consumer/consumer_ipc_client_impl.h b/src/tracing/ipc/consumer/consumer_ipc_client_impl.h
index 71f4968..eba824f 100644
--- a/src/tracing/ipc/consumer/consumer_ipc_client_impl.h
+++ b/src/tracing/ipc/consumer/consumer_ipc_client_impl.h
@@ -66,7 +66,7 @@
   void DisableTracing() override;
   void ReadBuffers() override;
   void FreeBuffers() override;
-  void Flush(uint32_t timeout_ms, FlushCallback) override;
+  void Flush(uint32_t timeout_ms, FlushCallback, FlushFlags) override;
   void Detach(const std::string& key) override;
   void Attach(const std::string& key) override;
   void GetTraceStats() override;
diff --git a/src/tracing/ipc/producer/producer_ipc_client_impl.cc b/src/tracing/ipc/producer/producer_ipc_client_impl.cc
index 6caf224..ea87953 100644
--- a/src/tracing/ipc/producer/producer_ipc_client_impl.cc
+++ b/src/tracing/ipc/producer/producer_ipc_client_impl.cc
@@ -374,10 +374,12 @@
     const auto* data_source_ids = cmd.flush().data_source_ids().data();
     static_assert(sizeof(data_source_ids[0]) == sizeof(DataSourceInstanceID),
                   "data_source_ids should be 64-bit");
+
+    FlushFlags flags(cmd.flush().flags());
     producer_->Flush(
         cmd.flush().request_id(),
         reinterpret_cast<const DataSourceInstanceID*>(data_source_ids),
-        static_cast<size_t>(cmd.flush().data_source_ids().size()));
+        static_cast<size_t>(cmd.flush().data_source_ids().size()), flags);
     return;
   }
 
diff --git a/src/tracing/ipc/service/consumer_ipc_service.cc b/src/tracing/ipc/service/consumer_ipc_service.cc
index ac52864..3e01316 100644
--- a/src/tracing/ipc/service/consumer_ipc_service.cc
+++ b/src/tracing/ipc/service/consumer_ipc_service.cc
@@ -129,8 +129,9 @@
     if (weak_this)
       weak_this->OnFlushCallback(success, std::move(it));
   };
-  GetConsumerForCurrentRequest()->service_endpoint->Flush(req.timeout_ms(),
-                                                          std::move(callback));
+  FlushFlags flags(req.flags());
+  GetConsumerForCurrentRequest()->service_endpoint->Flush(
+      req.timeout_ms(), std::move(callback), flags);
 }
 
 // Called by the IPC layer.
diff --git a/src/tracing/ipc/service/producer_ipc_service.cc b/src/tracing/ipc/service/producer_ipc_service.cc
index d13130a..3208096 100644
--- a/src/tracing/ipc/service/producer_ipc_service.cc
+++ b/src/tracing/ipc/service/producer_ipc_service.cc
@@ -506,7 +506,8 @@
 void ProducerIPCService::RemoteProducer::Flush(
     FlushRequestID flush_request_id,
     const DataSourceInstanceID* data_source_ids,
-    size_t num_data_sources) {
+    size_t num_data_sources,
+    FlushFlags flush_flags) {
   if (!async_producer_commands.IsBound()) {
     PERFETTO_DLOG(
         "The Service tried to request a flush but the remote Producer has not "
@@ -518,6 +519,7 @@
   for (size_t i = 0; i < num_data_sources; i++)
     cmd->mutable_flush()->add_data_source_ids(data_source_ids[i]);
   cmd->mutable_flush()->set_request_id(flush_request_id);
+  cmd->mutable_flush()->set_flags(flush_flags.flags());
   async_producer_commands.Resolve(std::move(cmd));
 }
 
diff --git a/src/tracing/ipc/service/producer_ipc_service.h b/src/tracing/ipc/service/producer_ipc_service.h
index b42ed73..dffba9d 100644
--- a/src/tracing/ipc/service/producer_ipc_service.h
+++ b/src/tracing/ipc/service/producer_ipc_service.h
@@ -93,7 +93,8 @@
     void OnTracingSetup() override;
     void Flush(FlushRequestID,
                const DataSourceInstanceID* data_source_ids,
-               size_t num_data_sources) override;
+               size_t num_data_sources,
+               FlushFlags) override;
 
     void ClearIncrementalState(const DataSourceInstanceID* data_source_ids,
                                size_t num_data_sources) override;
diff --git a/src/tracing/test/api_integrationtest.cc b/src/tracing/test/api_integrationtest.cc
index a363089..3ba6aab 100644
--- a/src/tracing/test/api_integrationtest.cc
+++ b/src/tracing/test/api_integrationtest.cc
@@ -282,7 +282,7 @@
   bool handle_flush_asynchronously = false;
   std::function<void()> on_start_callback;
   std::function<void()> on_stop_callback;
-  std::function<void()> on_flush_callback;
+  std::function<void(perfetto::FlushFlags)> on_flush_callback;
   std::function<void()> async_stop_closure;
   std::function<void()> async_flush_closure;
 };
@@ -949,8 +949,9 @@
   EXPECT_NE(handle_, nullptr);
   if (handle_->handle_flush_asynchronously)
     handle_->async_flush_closure = args.HandleFlushAsynchronously();
-  if (handle_->on_flush_callback)
-    handle_->on_flush_callback();
+  if (handle_->on_flush_callback) {
+    handle_->on_flush_callback(args.flush_flags);
+  }
   handle_->on_flush.Notify();
 }
 
@@ -4224,8 +4225,11 @@
   WaitableTestEvent producer_on_flush;
   WaitableTestEvent consumer_flush_done;
 
-  data_source->on_flush_callback = [&] {
+  data_source->on_flush_callback = [&](perfetto::FlushFlags flush_flags) {
     EXPECT_FALSE(consumer_flush_done.notified());
+    EXPECT_EQ(flush_flags.initiator(),
+              perfetto::FlushFlags::Initiator::kConsumerSdk);
+    EXPECT_EQ(flush_flags.reason(), perfetto::FlushFlags::Reason::kExplicit);
     producer_on_flush.Notify();
     MockDataSource::Trace([](MockDataSource::TraceContext ctx) {
       ctx.NewTracePacket()->set_for_testing()->set_str("on-flush");
@@ -4275,7 +4279,7 @@
   WaitableTestEvent consumer_flush_done;
 
   data_source->handle_flush_asynchronously = true;
-  data_source->on_flush_callback = [&] {
+  data_source->on_flush_callback = [&](perfetto::FlushFlags) {
     EXPECT_FALSE(consumer_flush_done.notified());
   };
 
diff --git a/src/tracing/test/mock_consumer.cc b/src/tracing/test/mock_consumer.cc
index e24ffcc..6601681 100644
--- a/src/tracing/test/mock_consumer.cc
+++ b/src/tracing/test/mock_consumer.cc
@@ -91,15 +91,19 @@
   task_runner_->RunUntilCheckpoint(checkpoint_name, timeout_ms);
 }
 
-MockConsumer::FlushRequest MockConsumer::Flush(uint32_t timeout_ms) {
+MockConsumer::FlushRequest MockConsumer::Flush(uint32_t timeout_ms,
+                                               FlushFlags flush_flags) {
   static int i = 0;
   auto checkpoint_name = "on_consumer_flush_" + std::to_string(i++);
   auto on_flush = task_runner_->CreateCheckpoint(checkpoint_name);
   std::shared_ptr<bool> result(new bool());
-  service_endpoint_->Flush(timeout_ms, [result, on_flush](bool success) {
-    *result = success;
-    on_flush();
-  });
+  service_endpoint_->Flush(
+      timeout_ms,
+      [result, on_flush](bool success) {
+        *result = success;
+        on_flush();
+      },
+      flush_flags);
 
   base::TestTaskRunner* task_runner = task_runner_;
   auto wait_for_flush_completion = [result, task_runner,
diff --git a/src/tracing/test/mock_consumer.h b/src/tracing/test/mock_consumer.h
index 2253d93..b7d8d0a 100644
--- a/src/tracing/test/mock_consumer.h
+++ b/src/tracing/test/mock_consumer.h
@@ -56,7 +56,10 @@
   void DisableTracing();
   void FreeBuffers();
   void WaitForTracingDisabled(uint32_t timeout_ms = 3000);
-  FlushRequest Flush(uint32_t timeout_ms = 10000);
+  FlushRequest Flush(
+      uint32_t timeout_ms = 10000,
+      FlushFlags = FlushFlags(FlushFlags::Initiator::kConsumerSdk,
+                              FlushFlags::Reason::kExplicit));
   std::vector<protos::gen::TracePacket> ReadBuffers();
   void GetTraceStats();
   TraceStats WaitForTraceStats(bool success);
diff --git a/src/tracing/test/mock_producer.cc b/src/tracing/test/mock_producer.cc
index 84ca4ee..ef40dd6 100644
--- a/src/tracing/test/mock_producer.cc
+++ b/src/tracing/test/mock_producer.cc
@@ -192,23 +192,32 @@
   return service_endpoint_->CreateTraceWriter(buf_id);
 }
 
-void MockProducer::ExpectFlush(TraceWriter* writer_to_flush, bool reply) {
+void MockProducer::ExpectFlush(TraceWriter* writer_to_flush,
+                               bool reply,
+                               FlushFlags expected_flags) {
   std::vector<TraceWriter*> writers;
   if (writer_to_flush)
     writers.push_back(writer_to_flush);
-  ExpectFlush(writers, reply);
+  ExpectFlush(writers, reply, expected_flags);
 }
 
 void MockProducer::ExpectFlush(std::vector<TraceWriter*> writers_to_flush,
-                               bool reply) {
-  auto& expected_call = EXPECT_CALL(*this, Flush(_, _, _));
-  expected_call.WillOnce(Invoke(
-      [this, writers_to_flush, reply](FlushRequestID flush_req_id,
-                                      const DataSourceInstanceID*, size_t) {
-        for (auto* writer : writers_to_flush)
+                               bool reply,
+                               FlushFlags expected_flags) {
+  auto& expected_call = EXPECT_CALL(*this, Flush(_, _, _, _));
+  expected_call.WillOnce(
+      Invoke([this, writers_to_flush, reply, expected_flags](
+                 FlushRequestID flush_req_id, const DataSourceInstanceID*,
+                 size_t, FlushFlags actual_flags) {
+        if (expected_flags.flags()) {
+          EXPECT_EQ(actual_flags, expected_flags);
+        }
+        for (auto* writer : writers_to_flush) {
           writer->Flush();
-        if (reply)
+        }
+        if (reply) {
           service_endpoint_->NotifyFlushComplete(flush_req_id);
+        }
       }));
 }
 
diff --git a/src/tracing/test/mock_producer.h b/src/tracing/test/mock_producer.h
index 932bd9c..d259a18 100644
--- a/src/tracing/test/mock_producer.h
+++ b/src/tracing/test/mock_producer.h
@@ -76,10 +76,13 @@
 
   // Expect a flush. Flushes |writer_to_flush| if non-null. If |reply| is true,
   // replies to the flush request, otherwise ignores it and doesn't reply.
-  void ExpectFlush(TraceWriter* writer_to_flush, bool reply = true);
+  void ExpectFlush(TraceWriter* writer_to_flush,
+                   bool reply = true,
+                   FlushFlags expected_flags = FlushFlags());
   // Same as above, but with a vector of writers.
   void ExpectFlush(std::vector<TraceWriter*> writers_to_flush,
-                   bool reply = true);
+                   bool reply = true,
+                   FlushFlags expected_flags = FlushFlags());
 
   TracingService::ProducerEndpoint* endpoint() {
     return service_endpoint_.get();
@@ -100,7 +103,7 @@
   MOCK_METHOD(void, OnTracingSetup, (), (override));
   MOCK_METHOD(void,
               Flush,
-              (FlushRequestID, const DataSourceInstanceID*, size_t),
+              (FlushRequestID, const DataSourceInstanceID*, size_t, FlushFlags),
               (override));
   MOCK_METHOD(void,
               ClearIncrementalState,
diff --git a/src/tracing/test/tracing_integration_test.cc b/src/tracing/test/tracing_integration_test.cc
index 29949a8..3d48453 100644
--- a/src/tracing/test/tracing_integration_test.cc
+++ b/src/tracing/test/tracing_integration_test.cc
@@ -73,7 +73,7 @@
   MOCK_METHOD(void, OnTracingSetup, (), (override));
   MOCK_METHOD(void,
               Flush,
-              (FlushRequestID, const DataSourceInstanceID*, size_t),
+              (FlushRequestID, const DataSourceInstanceID*, size_t, FlushFlags),
               (override));
   MOCK_METHOD(void,
               ClearIncrementalState,
@@ -525,13 +525,18 @@
   // Ask the service to flush, but don't flush our trace writer. This should
   // cause our uncommitted SMB chunk to be scraped.
   auto on_flush_complete = task_runner_->CreateCheckpoint("on_flush_complete");
-  consumer_endpoint_->Flush(5000, [on_flush_complete](bool success) {
-    EXPECT_TRUE(success);
-    on_flush_complete();
-  });
-  EXPECT_CALL(producer_, Flush(_, _, _))
+  FlushFlags flush_flags(FlushFlags::Initiator::kConsumerSdk,
+                         FlushFlags::Reason::kExplicit);
+  consumer_endpoint_->Flush(
+      5000,
+      [on_flush_complete](bool success) {
+        EXPECT_TRUE(success);
+        on_flush_complete();
+      },
+      flush_flags);
+  EXPECT_CALL(producer_, Flush(_, _, _, flush_flags))
       .WillOnce(Invoke([this](FlushRequestID flush_req_id,
-                              const DataSourceInstanceID*, size_t) {
+                              const DataSourceInstanceID*, size_t, FlushFlags) {
         producer_endpoint_->NotifyFlushComplete(flush_req_id);
       }));
   task_runner_->RunUntilCheckpoint("on_flush_complete");
diff --git a/test/data/ui-screenshots/ui-modal_dialog_dismiss_1.png.sha256 b/test/data/ui-screenshots/ui-modal_dialog_dismiss_1.png.sha256
index d79c836..9b5f546 100644
--- a/test/data/ui-screenshots/ui-modal_dialog_dismiss_1.png.sha256
+++ b/test/data/ui-screenshots/ui-modal_dialog_dismiss_1.png.sha256
@@ -1 +1 @@
-38a08a5232880f0c09e62f837a3839fe056c99853817b57dd98a0a5a0b9d6ee9
\ No newline at end of file
+846bdc1f6e20082da46d483049322300d91cb38ef72f922c934d46cc5df3507d
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_open_trace_and_go_back_to_landing_page.png.sha256 b/test/data/ui-screenshots/ui-routing_open_trace_and_go_back_to_landing_page.png.sha256
index a5e428b..b0f92e9 100644
--- a/test/data/ui-screenshots/ui-routing_open_trace_and_go_back_to_landing_page.png.sha256
+++ b/test/data/ui-screenshots/ui-routing_open_trace_and_go_back_to_landing_page.png.sha256
@@ -1 +1 @@
-03ad81a20b383b8d73ab66e50c64b38932ebff83b686f5e96874621d4b17e3e7
\ No newline at end of file
+3c80ba72b9bd0454af4aac3352c4e8f855f48feeba53d2a5ac7566333b4cf763
\ No newline at end of file
diff --git a/test/end_to_end_shared_memory_fuzzer.cc b/test/end_to_end_shared_memory_fuzzer.cc
index adde520..05f1eab 100644
--- a/test/end_to_end_shared_memory_fuzzer.cc
+++ b/test/end_to_end_shared_memory_fuzzer.cc
@@ -96,7 +96,10 @@
 
   void StopDataSource(DataSourceInstanceID) override {}
   void OnTracingSetup() override {}
-  void Flush(FlushRequestID, const DataSourceInstanceID*, size_t) override {}
+  void Flush(FlushRequestID,
+             const DataSourceInstanceID*,
+             size_t,
+             FlushFlags) override {}
   void ClearIncrementalState(const DataSourceInstanceID*, size_t) override {}
 
  private:
diff --git a/test/fake_producer.cc b/test/fake_producer.cc
index 90c3932..46308f5 100644
--- a/test/fake_producer.cc
+++ b/test/fake_producer.cc
@@ -160,7 +160,8 @@
 
 void FakeProducer::Flush(FlushRequestID flush_request_id,
                          const DataSourceInstanceID*,
-                         size_t num_data_sources) {
+                         size_t num_data_sources,
+                         FlushFlags) {
   PERFETTO_DCHECK(num_data_sources > 0);
   if (trace_writer_)
     trace_writer_->Flush();
diff --git a/test/fake_producer.h b/test/fake_producer.h
index 5922165..0b16590 100644
--- a/test/fake_producer.h
+++ b/test/fake_producer.h
@@ -79,7 +79,10 @@
                        const DataSourceConfig& source_config) override;
   void StopDataSource(DataSourceInstanceID) override;
   void OnTracingSetup() override;
-  void Flush(FlushRequestID, const DataSourceInstanceID*, size_t) override;
+  void Flush(FlushRequestID,
+             const DataSourceInstanceID*,
+             size_t,
+             FlushFlags) override;
   void ClearIncrementalState(const DataSourceInstanceID* /*data_source_ids*/,
                              size_t /*num_data_sources*/) override {}
 
diff --git a/test/test_helper.cc b/test/test_helper.cc
index 6054f3f..f991837 100644
--- a/test/test_helper.cc
+++ b/test/test_helper.cc
@@ -209,11 +209,12 @@
   endpoint_->DisableTracing();
 }
 
-void TestHelper::FlushAndWait(uint32_t timeout_ms) {
+void TestHelper::FlushAndWait(uint32_t timeout_ms, FlushFlags flush_flags) {
   static int flush_num = 0;
   std::string checkpoint_name = "flush." + std::to_string(flush_num++);
   auto checkpoint = CreateCheckpoint(checkpoint_name);
-  endpoint_->Flush(timeout_ms, [checkpoint](bool) { checkpoint(); });
+  endpoint_->Flush(
+      timeout_ms, [checkpoint](bool) { checkpoint(); }, flush_flags);
   RunUntilCheckpoint(checkpoint_name, timeout_ms + 1000);
 }
 
diff --git a/test/test_helper.h b/test/test_helper.h
index ba00422..91b955d 100644
--- a/test/test_helper.h
+++ b/test/test_helper.h
@@ -314,7 +314,7 @@
   void StartTracing(const TraceConfig& config,
                     base::ScopedFile = base::ScopedFile());
   void DisableTracing();
-  void FlushAndWait(uint32_t timeout_ms);
+  void FlushAndWait(uint32_t timeout_ms, FlushFlags = FlushFlags());
   void ReadData(uint32_t read_count = 0);
   void FreeBuffers();
   void DetachConsumer(const std::string& key);
diff --git a/test/trace_processor/diff_tests/android/android_anr_metric.out b/test/trace_processor/diff_tests/android/android_anr_metric.out
new file mode 100644
index 0000000..306d438
--- /dev/null
+++ b/test/trace_processor/diff_tests/android/android_anr_metric.out
@@ -0,0 +1,20 @@
+android_anr {
+    anr {
+        process_name: "com.google.android.app1"
+        pid: 11167
+        ts: 1000
+        subject: "Test ANR subject 1"
+        error_id: "da24554c-452a-4ae1-b74a-fb898f6e0982"
+    }
+    anr {
+        process_name: "com.google.android.app2"
+        ts: 2000
+        subject: "Test ANR subject 2"
+        error_id: "8612fece-c2f1-4aeb-9d45-8e6d9d0201cf"
+    }
+    anr {
+        process_name: "com.google.android.app3"
+        ts: 3000
+        error_id: "c25916a0-a8f0-41f3-87df-319e06471a0f"
+    }
+}
\ No newline at end of file
diff --git a/test/trace_processor/diff_tests/android/android_anr_metric.py b/test/trace_processor/diff_tests/android/android_anr_metric.py
new file mode 100644
index 0000000..9b624b2
--- /dev/null
+++ b/test/trace_processor/diff_tests/android/android_anr_metric.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from os import sys
+import synth_common
+
+from google.protobuf import text_format
+
+SS_PID = 1234
+
+trace = synth_common.create_trace()
+
+trace.add_packet()
+trace.add_process(pid=SS_PID, ppid=1, cmdline="system_server", uid=10001)
+
+# Add first ANR.
+trace.add_ftrace_packet(cpu=0)
+trace.add_atrace_counter(
+    ts=1000,
+    pid=SS_PID,
+    tid=SS_PID,
+    buf="ErrorId:com.google.android.app1 11167#da24554c-452a-4ae1-b74a-fb898f6e0982",
+    cnt=1)
+
+trace.add_ftrace_packet(cpu=0)
+trace.add_atrace_counter(
+    ts=1001,
+    tid=SS_PID,
+    pid=SS_PID,
+    buf="Subject(for ErrorId da24554c-452a-4ae1-b74a-fb898f6e0982):Test ANR subject 1",
+    cnt=1)
+
+# Add second ANR.
+# Does not include PID
+trace.add_ftrace_packet(cpu=0)
+trace.add_atrace_counter(
+    ts=2000,
+    pid=SS_PID,
+    tid=SS_PID,
+    buf="ErrorId:com.google.android.app2#8612fece-c2f1-4aeb-9d45-8e6d9d0201cf",
+    cnt=1)
+
+trace.add_ftrace_packet(cpu=0)
+trace.add_atrace_counter(
+    ts=2001,
+    tid=SS_PID,
+    pid=SS_PID,
+    buf="Subject(for ErrorId 8612fece-c2f1-4aeb-9d45-8e6d9d0201cf):Test ANR subject 2",
+    cnt=1)
+
+# Add third ANR.
+# Does not include PID or subject
+trace.add_ftrace_packet(cpu=0)
+trace.add_atrace_counter(
+    ts=3000,
+    pid=SS_PID,
+    tid=SS_PID,
+    buf="ErrorId:com.google.android.app3#c25916a0-a8f0-41f3-87df-319e06471a0f",
+    cnt=1)
+
+sys.stdout.buffer.write(trace.trace.SerializeToString())
diff --git a/test/trace_processor/diff_tests/android/tests.py b/test/trace_processor/diff_tests/android/tests.py
index 0565554..f05f9b1 100644
--- a/test/trace_processor/diff_tests/android/tests.py
+++ b/test/trace_processor/diff_tests/android/tests.py
@@ -277,6 +277,12 @@
         """,
         out=Path('android_network_activity.out'))
 
+  def test_anr_metric(self):
+    return DiffTestBlueprint(
+        trace=Path('android_anr_metric.py'),
+        query=Metric('android_anr'),
+        out=Path('android_anr_metric.out'))
+
   def test_binder_sync_binder_metrics(self):
     return DiffTestBlueprint(
         trace=DataPath('android_binder_metric_trace.atr'),
diff --git a/test/trace_processor/diff_tests/parsing/tests.py b/test/trace_processor/diff_tests/parsing/tests.py
index 30aea94..83a986c 100644
--- a/test/trace_processor/diff_tests/parsing/tests.py
+++ b/test/trace_processor/diff_tests/parsing/tests.py
@@ -833,14 +833,14 @@
             "pid": 1,
             "tid": 1,
             "ph": "B",
-            "ts": 1597071955492308000
+            "ts": 1597071955492308
           },
           {
             "name": "add_graph",
             "pid": 1,
             "tid": 1,
             "ph": "E",
-            "ts": 1597071955703771000
+            "ts": 1597071955703771
           }
         ]
         }
@@ -850,7 +850,7 @@
         """,
         out=Csv("""
         "ts","dur","name"
-        -7794778920422990592,211463000000,"add_graph"
+        1597071955492308000,211463000,"add_graph"
         """))
 
   # Parsing sched_blocked_reason
diff --git a/test/trace_processor/diff_tests/tables/tests_sched.py b/test/trace_processor/diff_tests/tables/tests_sched.py
index f6fc2ef..216c2e8 100644
--- a/test/trace_processor/diff_tests/tables/tests_sched.py
+++ b/test/trace_processor/diff_tests/tables/tests_sched.py
@@ -507,3 +507,69 @@
         1735489999987,45838,158,1,"init","/system/bin/init","traced_probes","/system/bin/traced_probes",4178,"S","[NULL]",30,0,1737061943856,1572057416,"S","[NULL]"
         1735490039439,570799,544,527,"adbd","/apex/com.android.adbd/bin/adbd","init","/system/bin/init","[NULL]","[NULL]","[NULL]",0,1,1735490039439,"[NULL]","[NULL]","[NULL]"
         """))
+
+  def test_thread_executing_span_critical_path_thread_states(self):
+    return DiffTestBlueprint(
+        trace=DataPath('sched_wakeup_trace.atr'),
+        query="""
+        SELECT IMPORT('experimental.thread_executing_span');
+        SELECT
+          ts,
+          dur,
+          tid,
+          pid,
+          thread_name,
+          process_name,
+          state,
+          blocked_function,
+          height
+        FROM experimental_thread_executing_span_critical_path_thread_states(257)
+        ORDER BY ts
+        LIMIT 10
+        """,
+        out=Csv("""
+        "ts","dur","tid","pid","thread_name","process_name","state","blocked_function","height"
+        1736109621029,34116,1469,1469,"m.android.phone","com.android.phone","R","[NULL]",0
+        1736109655145,680044,1469,1469,"m.android.phone","com.android.phone","Running","[NULL]",0
+        1736110335189,83413,657,642,"binder:642_1","system_server","R","[NULL]",1
+        1736110418602,492287,657,642,"binder:642_1","system_server","Running","[NULL]",1
+        1736110910889,122878,1469,1469,"m.android.phone","com.android.phone","R","[NULL]",0
+        1736111033767,282646,1469,1469,"m.android.phone","com.android.phone","Running","[NULL]",0
+        1736111316413,19907,657,642,"binder:642_1","system_server","R","[NULL]",1
+        1736111336320,370659,657,642,"binder:642_1","system_server","Running","[NULL]",1
+        1736111706979,44391,1469,1469,"m.android.phone","com.android.phone","R","[NULL]",0
+        1736111751370,143860,1469,1469,"m.android.phone","com.android.phone","Running","[NULL]",0
+        """))
+
+  def test_thread_executing_span_critical_path_slices(self):
+    return DiffTestBlueprint(
+        trace=DataPath('sched_wakeup_trace.atr'),
+        query="""
+        SELECT IMPORT('experimental.thread_executing_span');
+        SELECT
+          ts,
+          dur,
+          tid,
+          pid,
+          thread_name,
+          process_name,
+          slice_name,
+          slice_depth,
+          height
+        FROM experimental_thread_executing_span_critical_path_slices(257)
+        ORDER BY ts
+        LIMIT 10
+        """,
+        out=Csv("""
+        "ts","dur","tid","pid","thread_name","process_name","slice_name","slice_depth","height"
+        1736110278076,57113,1469,1469,"m.android.phone","com.android.phone","binder transaction",0,0
+        1736110435876,462664,657,642,"binder:642_1","system_server","binder reply",0,1
+        1736110692464,135281,657,642,"binder:642_1","system_server","AIDL::java::INetworkStatsService::getMobileIfaces::server",1,1
+        1736110910889,132674,1469,1469,"m.android.phone","com.android.phone","binder transaction",0,0
+        1736111274404,42009,1469,1469,"m.android.phone","com.android.phone","binder transaction",0,0
+        1736111340019,361607,657,642,"binder:642_1","system_server","binder reply",0,1
+        1736111417370,249758,657,642,"binder:642_1","system_server","AIDL::java::INetworkStatsService::getIfaceStats::server",1,1
+        1736111706979,48463,1469,1469,"m.android.phone","com.android.phone","binder transaction",0,0
+        1736111874030,21200,1469,1469,"m.android.phone","com.android.phone","binder transaction",0,0
+        1736111923740,159330,657,642,"binder:642_1","system_server","binder reply",0,1
+        """))
diff --git a/tools/check_sql_modules.py b/tools/check_sql_modules.py
index 542d2a7..4ffa402 100755
--- a/tools/check_sql_modules.py
+++ b/tools/check_sql_modules.py
@@ -73,6 +73,14 @@
       modules.append((path, sql, parsed))
 
   for path, sql, parsed in modules:
+    lines = [l.strip() for l in sql.split('\n')]
+    for line in lines:
+      if line.startswith('--'):
+        continue
+      if 'RUN_METRIC' in line:
+        errors.append(f"RUN_METRIC is banned in standard library.\n"
+                      f"Offending file: {path}\n")
+
     errors += parsed.errors
     errors += check_banned_words(sql, path)
     errors += check_banned_create_table_as(sql,
diff --git a/ui/release/channels.json b/ui/release/channels.json
index dd3a4fb..d8b0bf8 100644
--- a/ui/release/channels.json
+++ b/ui/release/channels.json
@@ -6,7 +6,7 @@
     },
     {
       "name": "canary",
-      "rev": "7254c51a1d5a5a81aeb7c790d2bac38af162f6d7"
+      "rev": "c69b33b9abcc20fad9ad5f39de883216e4b43130"
     },
     {
       "name": "autopush",
diff --git a/ui/src/frontend/home_page.ts b/ui/src/frontend/home_page.ts
index bbe09d7..a45737b 100644
--- a/ui/src/frontend/home_page.ts
+++ b/ui/src/frontend/home_page.ts
@@ -42,9 +42,9 @@
               m(Anchor,
                 {
                   href:
-                      'https://perfetto.dev/docs/visualization/perfetto-ui#command-pallete',
+                      'https://perfetto.dev/docs/visualization/perfetto-ui#command-palette',
                 },
-                'command pallete,'),
+                'command palette,'),
               ' press ',
               m(HotkeyGlyphs, {hotkey: '!Mod+Shift+P'}),
               '.'),