Merge "protozero: allow top-level extension blocks" into main
diff --git a/Android.bp b/Android.bp
index 6cb0053..3450abc 100644
--- a/Android.bp
+++ b/Android.bp
@@ -11304,6 +11304,7 @@
     name: "perfetto_src_trace_processor_containers_unittests",
     srcs: [
         "src/trace_processor/containers/bit_vector_unittest.cc",
+        "src/trace_processor/containers/implicit_segment_forest_unittest.cc",
         "src/trace_processor/containers/null_term_string_view_unittest.cc",
         "src/trace_processor/containers/row_map_unittest.cc",
         "src/trace_processor/containers/string_pool_unittest.cc",
@@ -12485,6 +12486,10 @@
         "src/trace_processor/perfetto_sql/stdlib/stack_trace/jit.sql",
         "src/trace_processor/perfetto_sql/stdlib/time/conversion.sql",
         "src/trace_processor/perfetto_sql/stdlib/v8/jit.sql",
+        "src/trace_processor/perfetto_sql/stdlib/viz/summary/processes.sql",
+        "src/trace_processor/perfetto_sql/stdlib/viz/summary/slices.sql",
+        "src/trace_processor/perfetto_sql/stdlib/viz/summary/threads.sql",
+        "src/trace_processor/perfetto_sql/stdlib/viz/summary/tracks.sql",
         "src/trace_processor/perfetto_sql/stdlib/wattson/arm_dsu.sql",
         "src/trace_processor/perfetto_sql/stdlib/wattson/cpu_freq.sql",
         "src/trace_processor/perfetto_sql/stdlib/wattson/cpu_idle.sql",
@@ -12891,6 +12896,7 @@
 filegroup {
     name: "perfetto_src_trace_redaction_trace_redaction",
     srcs: [
+        "src/trace_redaction/collect_frame_cookies.cc",
         "src/trace_redaction/collect_timeline_events.cc",
         "src/trace_redaction/filter_ftrace_using_allowlist.cc",
         "src/trace_redaction/filter_packet_using_allowlist.cc",
@@ -12921,6 +12927,7 @@
 filegroup {
     name: "perfetto_src_trace_redaction_unittests",
     srcs: [
+        "src/trace_redaction/collect_frame_cookies_unittest.cc",
         "src/trace_redaction/collect_timeline_events_unittest.cc",
         "src/trace_redaction/filter_ftrace_using_allowlist_unittest.cc",
         "src/trace_redaction/filter_packet_using_allowlist_unittest.cc",
diff --git a/BUILD b/BUILD
index 12fe725..389e3b8 100644
--- a/BUILD
+++ b/BUILD
@@ -1366,6 +1366,7 @@
         ":include_perfetto_public_base",
         ":include_perfetto_public_protozero",
         "src/trace_processor/containers/bit_vector.h",
+        "src/trace_processor/containers/implicit_segment_forest.h",
         "src/trace_processor/containers/null_term_string_view.h",
         "src/trace_processor/containers/row_map.h",
         "src/trace_processor/containers/row_map_algorithms.h",
@@ -2620,6 +2621,17 @@
     ],
 )
 
+# GN target: //src/trace_processor/perfetto_sql/stdlib/viz/summary:summary
+perfetto_filegroup(
+    name = "src_trace_processor_perfetto_sql_stdlib_viz_summary_summary",
+    srcs = [
+        "src/trace_processor/perfetto_sql/stdlib/viz/summary/processes.sql",
+        "src/trace_processor/perfetto_sql/stdlib/viz/summary/slices.sql",
+        "src/trace_processor/perfetto_sql/stdlib/viz/summary/threads.sql",
+        "src/trace_processor/perfetto_sql/stdlib/viz/summary/tracks.sql",
+    ],
+)
+
 # GN target: //src/trace_processor/perfetto_sql/stdlib/wattson:wattson
 perfetto_filegroup(
     name = "src_trace_processor_perfetto_sql_stdlib_wattson_wattson",
@@ -2655,6 +2667,7 @@
         ":src_trace_processor_perfetto_sql_stdlib_stack_trace_stack_trace",
         ":src_trace_processor_perfetto_sql_stdlib_time_time",
         ":src_trace_processor_perfetto_sql_stdlib_v8_v8",
+        ":src_trace_processor_perfetto_sql_stdlib_viz_summary_summary",
         ":src_trace_processor_perfetto_sql_stdlib_wattson_wattson",
     ],
     outs = [
diff --git a/src/protozero/protoc_plugin/protozero_plugin.cc b/src/protozero/protoc_plugin/protozero_plugin.cc
index 2c6e62d..26f3110 100644
--- a/src/protozero/protoc_plugin/protozero_plugin.cc
+++ b/src/protozero/protoc_plugin/protozero_plugin.cc
@@ -764,8 +764,9 @@
     }
     // Iterate over all fields in "extend" blocks.
     for (int i = 0; i < message->extension_range_count(); ++i) {
-      const Descriptor::ExtensionRange* range = message->extension_range(i);
-      int candidate = range->end - 1;
+      Descriptor::ExtensionRange::Proto range;
+      message->extension_range(i)->CopyTo(&range);
+      int candidate = range.end() - 1;
       if (candidate > kMaxDecoderFieldId)
         continue;
       max_field_id = std::max(max_field_id, candidate);
diff --git a/src/trace_processor/containers/BUILD.gn b/src/trace_processor/containers/BUILD.gn
index 43e9603..c724df8 100644
--- a/src/trace_processor/containers/BUILD.gn
+++ b/src/trace_processor/containers/BUILD.gn
@@ -22,6 +22,7 @@
 perfetto_component("containers") {
   public = [
     "bit_vector.h",
+    "implicit_segment_forest.h",
     "null_term_string_view.h",
     "row_map.h",
     "row_map_algorithms.h",
@@ -44,6 +45,7 @@
   testonly = true
   sources = [
     "bit_vector_unittest.cc",
+    "implicit_segment_forest_unittest.cc",
     "null_term_string_view_unittest.cc",
     "row_map_unittest.cc",
     "string_pool_unittest.cc",
diff --git a/src/trace_processor/containers/implicit_segment_forest.h b/src/trace_processor/containers/implicit_segment_forest.h
new file mode 100644
index 0000000..f699181
--- /dev/null
+++ b/src/trace_processor/containers/implicit_segment_forest.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_CONTAINERS_IMPLICIT_SEGMENT_FOREST_H_
+#define SRC_TRACE_PROCESSOR_CONTAINERS_IMPLICIT_SEGMENT_FOREST_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+#include "perfetto/base/logging.h"
+
+namespace perfetto::trace_processor {
+
+// An implementation of a segment tree data structure [1] with:
+// 1) parent-child relationships are implicit, saving memory.
+// 2) the requirement for the number of values being a power of two, turning
+//    the tree into a forest.
+//
+// Segment trees are a very powerful data structure allowing O(log(n)) aggregate
+// queries to be performed on an arbitrary range of elements in an array.
+// Specifically, for `T x[n]`, and an associative and commutative operation
+// AggOp (e.g. +, *, min, max, etc.), segment trees can compute
+// ```
+//   T y = AggOp()(x[i], x[i + 1], x[i + 2], ..., x[j])
+// ```
+// in O(log(n)) time.
+//
+// Practically, in trace processor, this is useful for computing aggregations
+// over events in a trace. For example:
+// ```
+// struct Slice { int64_t ts; int64_t dur; };
+// struct MaxDurSlice {
+//   Slice operator()(const Slice& a, const Slice& b) {
+//     return a.dur < b.dur ? b : a;
+//   }
+// }
+// using MipMap = ImplicitSegmentForest<Slice, MaxDurSlice>;
+// ```
+// allows building a "mipmap" [2] of a track in a trace in a UI. The UI can show
+// a representation of the items in the track when very zoomed out while
+// skipping the rendering slices which are smaller than one pixel.
+//
+// The design and implementation of this class takes heavy inspiration from
+// Tristan Hume's "IForestIndex" data structure [3] as described in his blog
+// post [4].
+//
+// [1] https://en.algorithmica.org/hpc/data-structures/segment-trees/
+// [2] https://en.wikipedia.org/wiki/Mipmap
+// [3]
+// https://github.com/trishume/gigatrace/blob/dfde0d7244f356bdc9aeefb387d904dd8b09d94a/src/iforest.rs
+// [4] https://thume.ca/2021/03/14/iforests/
+template <typename T, typename AggOp>
+class ImplicitSegmentForest {
+ public:
+  // Computes the aggregation (as specified by operator() in AggOp) over all
+  // elements in the tree between the indices [start, end). Requires that
+  // start < end.
+  //
+  // Complexity:
+  // This function performs O(log(n)) operations (n = end - start).
+  //
+  // Returns:
+  //  1) values[start]: if start + 1 == end
+  //  2) AggOp()(values[start], ..., values[end - 1]) otherwise
+  T Query(uint32_t start, uint32_t end) const {
+    PERFETTO_DCHECK(start < end);
+
+    const uint32_t in_start = start * 2;
+    const uint32_t in_end = end * 2;
+
+    uint32_t first_skip = LargestPrefixInsideSkip(in_start, in_end);
+    T aggregated = values_[AggNode(in_start, first_skip)];
+    for (uint32_t i = in_start + first_skip; i < in_end;) {
+      uint32_t skip = LargestPrefixInsideSkip(i, in_end);
+      aggregated = AggOp()(aggregated, values_[AggNode(i, skip)]);
+      i += skip;
+    }
+    return aggregated;
+  }
+
+  // Pushes a new element to right-most part of the tree. This index of this
+  // element can be used in future calls to |Query|.
+  void Push(T v) {
+    values_.emplace_back(std::move(v));
+
+    size_t len = values_.size();
+    auto levels_to_index = static_cast<uint32_t>(__builtin_ctzl(~len)) - 1;
+
+    size_t cur = len - 1;
+    for (uint32_t level = 0; level < levels_to_index; ++level) {
+      size_t prev_higher_level = cur - (1 << level);
+      values_[prev_higher_level] =
+          AggOp()(values_[prev_higher_level], values_[cur]);
+      cur = prev_higher_level;
+    }
+    values_.emplace_back(values_[len - (1 << levels_to_index)]);
+  }
+
+  // Returns the value at |n| in the tree: this corresponds to the |n|th
+  // element |Push|-ed into the tree.
+  const T& operator[](uint32_t n) { return values_[n * 2]; }
+
+ private:
+  static uint32_t Lsp(uint32_t x) { return x & -x; }
+  static uint32_t Msp(uint32_t x) {
+    return (1u << (sizeof(x) * 8 - 1)) >> __builtin_clz(x);
+  }
+  static uint32_t LargestPrefixInsideSkip(uint32_t min, uint32_t max) {
+    return Lsp(min | Msp(max - min));
+  }
+  static uint32_t AggNode(uint32_t i, uint32_t offset) {
+    return i + (offset >> 1) - 1;
+  }
+
+  std::vector<T> values_;
+};
+
+}  // namespace perfetto::trace_processor
+
+#endif  // SRC_TRACE_PROCESSOR_CONTAINERS_IMPLICIT_SEGMENT_FOREST_H_
diff --git a/src/trace_processor/containers/implicit_segment_forest_unittest.cc b/src/trace_processor/containers/implicit_segment_forest_unittest.cc
new file mode 100644
index 0000000..16dd262
--- /dev/null
+++ b/src/trace_processor/containers/implicit_segment_forest_unittest.cc
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/containers/implicit_segment_forest.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <numeric>
+#include <random>
+#include <vector>
+
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto::trace_processor {
+namespace {
+
+struct Value {
+  uint32_t value;
+};
+
+struct Sum {
+  Value operator()(const Value& a, const Value& b) {
+    return Value{a.value + b.value};
+  }
+};
+
+TEST(ImplicitSegmentTree, SimpleSum) {
+  std::vector<uint32_t> res = {209, 330, 901, 3, 10, 0, 3903, 309, 490};
+
+  ImplicitSegmentForest<Value, Sum> forest;
+  for (uint32_t x : res) {
+    forest.Push(Value{x});
+  }
+
+  for (uint32_t i = 0; i < res.size(); ++i) {
+    for (uint32_t j = i + 1; j < res.size(); ++j) {
+      ASSERT_EQ(forest.Query(i, j).value,
+                std::accumulate(res.begin() + i, res.begin() + j, 0u));
+    }
+  }
+}
+
+TEST(ImplicitSegmentTree, Stress) {
+  static constexpr size_t kCount = 9249;
+  std::minstd_rand0 rng(42);
+
+  std::vector<uint32_t> res;
+  ImplicitSegmentForest<Value, Sum> forest;
+  for (uint32_t i = 0; i < kCount; ++i) {
+    res.push_back(static_cast<uint32_t>(rng()));
+    forest.Push(Value{res.back()});
+  }
+
+  for (uint32_t i = 0; i < 10000; ++i) {
+    uint32_t s = rng() % kCount;
+    uint32_t e = s + 1 + (rng() % (kCount - s));
+    ASSERT_EQ(forest.Query(s, e).value,
+              std::accumulate(res.begin() + s, res.begin() + e, 0u));
+  }
+}
+
+}  // namespace
+}  // namespace perfetto::trace_processor
diff --git a/src/trace_processor/db/column/dense_null_overlay.cc b/src/trace_processor/db/column/dense_null_overlay.cc
index e171ec0..55003fa 100644
--- a/src/trace_processor/db/column/dense_null_overlay.cc
+++ b/src/trace_processor/db/column/dense_null_overlay.cc
@@ -82,10 +82,10 @@
       case SearchValidationResult::kNoData: {
         // There is no need to search in underlying storage. It's enough to
         // intersect the |non_null_|.
-        BitVector res = non_null_->IntersectRange(in.start, in.end);
-        res.Not();
+        BitVector res = non_null_->Copy();
         res.Resize(in.end, false);
-        return RangeOrBitVector(std::move(res));
+        res.Not();
+        return RangeOrBitVector(res.IntersectRange(in.start, in.end));
       }
       case SearchValidationResult::kAllData:
         return RangeOrBitVector(in);
diff --git a/src/trace_processor/db/column/null_overlay.cc b/src/trace_processor/db/column/null_overlay.cc
index fa1e679..b1f8f45 100644
--- a/src/trace_processor/db/column/null_overlay.cc
+++ b/src/trace_processor/db/column/null_overlay.cc
@@ -129,10 +129,10 @@
       case SearchValidationResult::kNoData: {
         // There is no need to search in underlying storage. It's enough to
         // intersect the |non_null_|.
-        BitVector res = non_null_->IntersectRange(in.start, in.end);
-        res.Not();
+        BitVector res = non_null_->Copy();
         res.Resize(in.end, false);
-        return RangeOrBitVector(std::move(res));
+        res.Not();
+        return RangeOrBitVector(res.IntersectRange(in.start, in.end));
       }
       case SearchValidationResult::kAllData:
         return RangeOrBitVector(in);
diff --git a/src/trace_processor/db/column/numeric_storage.h b/src/trace_processor/db/column/numeric_storage.h
index f7cb51c..821fd40 100644
--- a/src/trace_processor/db/column/numeric_storage.h
+++ b/src/trace_processor/db/column/numeric_storage.h
@@ -100,21 +100,7 @@
     SingleSearchResult SingleSearch(FilterOp op,
                                     SqlValue sql_val,
                                     uint32_t i) const override {
-      if constexpr (std::is_same_v<T, double>) {
-        if (sql_val.type != SqlValue::kDouble) {
-          return SingleSearchResult::kNeedsFullSearch;
-        }
-        return utils::SingleSearchNumeric(op, (*vector_)[i],
-                                          sql_val.double_value);
-      } else {
-        if (sql_val.type != SqlValue::kLong ||
-            sql_val.long_value > std::numeric_limits<T>::max() ||
-            sql_val.long_value < std::numeric_limits<T>::min()) {
-          return SingleSearchResult::kNeedsFullSearch;
-        }
-        return utils::SingleSearchNumeric(op, (*vector_)[i],
-                                          static_cast<T>(sql_val.long_value));
-      }
+      return utils::SingleSearchNumeric(op, (*vector_)[i], sql_val);
     }
 
     void StableSort(SortToken* start,
diff --git a/src/trace_processor/db/column/numeric_storage_unittest.cc b/src/trace_processor/db/column/numeric_storage_unittest.cc
index 2b20306..46ef920 100644
--- a/src/trace_processor/db/column/numeric_storage_unittest.cc
+++ b/src/trace_processor/db/column/numeric_storage_unittest.cc
@@ -187,6 +187,11 @@
             SingleSearchResult::kMatch);
   ASSERT_EQ(chain->SingleSearch(FilterOp::kGe, SqlValue::Long(0), 5),
             SingleSearchResult::kNoMatch);
+
+  ASSERT_EQ(chain->SingleSearch(FilterOp::kIsNull, SqlValue(), 0),
+            SingleSearchResult::kNoMatch);
+  ASSERT_EQ(chain->SingleSearch(FilterOp::kIsNotNull, SqlValue(), 0),
+            SingleSearchResult::kMatch);
 }
 
 TEST(NumericStorage, Search) {
diff --git a/src/trace_processor/db/column/set_id_storage.cc b/src/trace_processor/db/column/set_id_storage.cc
index 6e4719f..ae1c167 100644
--- a/src/trace_processor/db/column/set_id_storage.cc
+++ b/src/trace_processor/db/column/set_id_storage.cc
@@ -72,15 +72,7 @@
 SingleSearchResult SetIdStorage::ChainImpl::SingleSearch(FilterOp op,
                                                          SqlValue sql_val,
                                                          uint32_t i) const {
-  if (sql_val.type != SqlValue::kLong ||
-      sql_val.long_value > std::numeric_limits<uint32_t>::max() ||
-      sql_val.long_value < std::numeric_limits<uint32_t>::min()) {
-    // Because of the large amount of code needing for handling comparisions
-    // with doubles or out of range values, just defer to the full search.
-    return SingleSearchResult::kNeedsFullSearch;
-  }
-  return utils::SingleSearchNumeric(op, (*values_)[i],
-                                    static_cast<uint32_t>(sql_val.long_value));
+  return utils::SingleSearchNumeric(op, (*values_)[i], sql_val);
 }
 
 SearchValidationResult SetIdStorage::ChainImpl::ValidateSearchConstraints(
diff --git a/src/trace_processor/db/column/utils.h b/src/trace_processor/db/column/utils.h
index 14c6456..9533d8cd 100644
--- a/src/trace_processor/db/column/utils.h
+++ b/src/trace_processor/db/column/utils.h
@@ -19,7 +19,9 @@
 #include <algorithm>
 #include <cstdint>
 #include <functional>
+#include <limits>
 #include <optional>
+#include <type_traits>
 #include <vector>
 
 #include "perfetto/base/logging.h"
@@ -29,6 +31,36 @@
 #include "src/trace_processor/db/column/types.h"
 
 namespace perfetto::trace_processor::column::utils {
+namespace internal {
+
+template <typename T, typename Comparator>
+SingleSearchResult SingleSearchNumeric(T left, const SqlValue& right_v) {
+  if constexpr (std::is_same_v<T, double>) {
+    if (right_v.type != SqlValue::kDouble) {
+      // Because of the large amount of code needing for handling comparisons
+      // with integers, just defer to the full search.
+      return SingleSearchResult::kNeedsFullSearch;
+    }
+    return Comparator()(left, right_v.double_value)
+               ? SingleSearchResult::kMatch
+               : SingleSearchResult::kNoMatch;
+  } else if constexpr (std::is_integral_v<T>) {
+    if (right_v.type != SqlValue::kLong ||
+        right_v.long_value > std::numeric_limits<T>::max() ||
+        right_v.long_value < std::numeric_limits<T>::min()) {
+      // Because of the large amount of code needing for handling comparisons
+      // with doubles or out of range values, just defer to the full search.
+      return SingleSearchResult::kNeedsFullSearch;
+    }
+    return Comparator()(left, static_cast<T>(right_v.long_value))
+               ? SingleSearchResult::kMatch
+               : SingleSearchResult::kNoMatch;
+  } else {
+    static_assert(std::is_same_v<T, void>, "Illegal type");
+  }
+}
+
+}  // namespace internal
 
 template <typename Comparator, typename ValType, typename DataType>
 void LinearSearchWithComparator(ValType val,
@@ -77,27 +109,25 @@
 }
 
 template <typename T>
-SingleSearchResult SingleSearchNumeric(FilterOp op, T left, T right) {
+SingleSearchResult SingleSearchNumeric(FilterOp op,
+                                       T left,
+                                       const SqlValue& right_v) {
   switch (op) {
     case FilterOp::kEq:
-      return std::equal_to<T>()(left, right) ? SingleSearchResult::kMatch
-                                             : SingleSearchResult::kNoMatch;
+      return internal::SingleSearchNumeric<T, std::equal_to<T>>(left, right_v);
     case FilterOp::kNe:
-      return std::not_equal_to<T>()(left, right) ? SingleSearchResult::kMatch
-                                                 : SingleSearchResult::kNoMatch;
+      return internal::SingleSearchNumeric<T, std::not_equal_to<T>>(left,
+                                                                    right_v);
     case FilterOp::kGe:
-      return std::greater_equal<T>()(left, right)
-                 ? SingleSearchResult::kMatch
-                 : SingleSearchResult::kNoMatch;
+      return internal::SingleSearchNumeric<T, std::greater_equal<T>>(left,
+                                                                     right_v);
     case FilterOp::kGt:
-      return std::greater<T>()(left, right) ? SingleSearchResult::kMatch
-                                            : SingleSearchResult::kNoMatch;
+      return internal::SingleSearchNumeric<T, std::greater<T>>(left, right_v);
     case FilterOp::kLe:
-      return std::less_equal<T>()(left, right) ? SingleSearchResult::kMatch
-                                               : SingleSearchResult::kNoMatch;
+      return internal::SingleSearchNumeric<T, std::less_equal<T>>(left,
+                                                                  right_v);
     case FilterOp::kLt:
-      return std::less<T>()(left, right) ? SingleSearchResult::kMatch
-                                         : SingleSearchResult::kNoMatch;
+      return internal::SingleSearchNumeric<T, std::less<T>>(left, right_v);
     case FilterOp::kIsNotNull:
       return SingleSearchResult::kMatch;
     case FilterOp::kGlob:
diff --git a/src/trace_processor/importers/ftrace/ftrace_descriptors.cc b/src/trace_processor/importers/ftrace/ftrace_descriptors.cc
index 3b846f4..0955a25 100644
--- a/src/trace_processor/importers/ftrace/ftrace_descriptors.cc
+++ b/src/trace_processor/importers/ftrace/ftrace_descriptors.cc
@@ -24,7 +24,7 @@
 namespace trace_processor {
 namespace {
 
-std::array<FtraceMessageDescriptor, 498> descriptors{{
+std::array<FtraceMessageDescriptor, 502> descriptors{{
     {nullptr, 0, {}},
     {nullptr, 0, {}},
     {nullptr, 0, {}},
@@ -5501,6 +5501,52 @@
             {"prefree_seg", ProtoSchemaType::kUint32},
         },
     },
+    {
+        "fastrpc_dma_free",
+        3,
+        {
+            {},
+            {"cid", ProtoSchemaType::kInt32},
+            {"phys", ProtoSchemaType::kUint64},
+            {"size", ProtoSchemaType::kUint64},
+        },
+    },
+    {
+        "fastrpc_dma_alloc",
+        5,
+        {
+            {},
+            {"cid", ProtoSchemaType::kInt32},
+            {"phys", ProtoSchemaType::kUint64},
+            {"size", ProtoSchemaType::kUint64},
+            {"attr", ProtoSchemaType::kUint64},
+            {"mflags", ProtoSchemaType::kInt32},
+        },
+    },
+    {
+        "fastrpc_dma_unmap",
+        3,
+        {
+            {},
+            {"cid", ProtoSchemaType::kInt32},
+            {"phys", ProtoSchemaType::kUint64},
+            {"size", ProtoSchemaType::kUint64},
+        },
+    },
+    {
+        "fastrpc_dma_map",
+        7,
+        {
+            {},
+            {"cid", ProtoSchemaType::kInt32},
+            {"fd", ProtoSchemaType::kInt32},
+            {"phys", ProtoSchemaType::kUint64},
+            {"size", ProtoSchemaType::kUint64},
+            {"len", ProtoSchemaType::kUint64},
+            {"attr", ProtoSchemaType::kUint32},
+            {"mflags", ProtoSchemaType::kInt32},
+        },
+    },
 }};
 
 }  // namespace
diff --git a/src/trace_processor/importers/proto/winscope/protolog_messages_tracker.h b/src/trace_processor/importers/proto/winscope/protolog_messages_tracker.h
index 0f47c32..7b1eb9b 100644
--- a/src/trace_processor/importers/proto/winscope/protolog_messages_tracker.h
+++ b/src/trace_processor/importers/proto/winscope/protolog_messages_tracker.h
@@ -41,11 +41,11 @@
   };
 
   static ProtoLogMessagesTracker* GetOrCreate(TraceProcessorContext* context) {
-    if (!context->shell_transitions_tracker) {
-      context->shell_transitions_tracker.reset(new ProtoLogMessagesTracker());
+    if (!context->protolog_messages_tracker) {
+      context->protolog_messages_tracker.reset(new ProtoLogMessagesTracker());
     }
     return static_cast<ProtoLogMessagesTracker*>(
-        context->shell_transitions_tracker.get());
+        context->protolog_messages_tracker.get());
   }
 
   void TrackMessage(TrackedProtoLogMessage tracked_protolog_message);
diff --git a/src/trace_processor/perfetto_sql/stdlib/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/BUILD.gn
index 4c492d1..349e2f6 100644
--- a/src/trace_processor/perfetto_sql/stdlib/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/stdlib/BUILD.gn
@@ -36,6 +36,7 @@
     "stack_trace",
     "time",
     "v8",
+    "viz/summary",
     "wattson",
   ]
   generated_header = "stdlib.h"
diff --git a/src/trace_processor/perfetto_sql/stdlib/viz/summary/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/viz/summary/BUILD.gn
new file mode 100644
index 0000000..3f664bd
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/stdlib/viz/summary/BUILD.gn
@@ -0,0 +1,24 @@
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../../../../gn/perfetto_sql.gni")
+
+perfetto_sql_source_set("summary") {
+  sources = [
+    "processes.sql",
+    "slices.sql",
+    "threads.sql",
+    "tracks.sql",
+  ]
+}
diff --git a/src/trace_processor/perfetto_sql/stdlib/viz/summary/processes.sql b/src/trace_processor/perfetto_sql/stdlib/viz/summary/processes.sql
new file mode 100644
index 0000000..18aa202
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/stdlib/viz/summary/processes.sql
@@ -0,0 +1,93 @@
+--
+-- Copyright 2024 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+--     https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+INCLUDE PERFETTO MODULE viz.summary.slices;
+INCLUDE PERFETTO MODULE viz.summary.threads;
+
+CREATE PERFETTO TABLE _process_track_summary AS
+SELECT upid, SUM(cnt) AS slice_count
+FROM process_track
+JOIN _slice_track_summary USING (id)
+GROUP BY upid;
+
+CREATE PERFETTO TABLE _heap_profile_allocation_summary AS
+SELECT upid, COUNT() AS allocation_count
+FROM heap_profile_allocation
+GROUP BY upid;
+
+CREATE PERFETTO TABLE _heap_profile_graph_summary AS
+SELECT upid, COUNT() AS graph_object_count
+FROM heap_graph_object;
+
+CREATE PERFETTO TABLE _thread_process_grouped_summary AS
+SELECT
+  upid,
+  MAX(max_running_dur) AS max_running_dur,
+  SUM(sum_running_dur) AS sum_running_dur,
+  SUM(running_count) AS running_count,
+  SUM(slice_count) AS slice_count,
+  SUM(perf_sample_count) AS perf_sample_count
+FROM _thread_available_info_summary
+JOIN thread USING (utid)
+WHERE upid IS NOT NULL
+GROUP BY upid;
+
+CREATE PERFETTO TABLE _process_available_info_summary AS
+WITH r AS (
+  SELECT
+    upid,
+    t_summary.upid as summary_upid,
+    t_summary.max_running_dur AS max_running_dur,
+    t_summary.sum_running_dur,
+    t_summary.running_count,
+    t_summary.slice_count AS thread_slice_count,
+    t_summary.perf_sample_count AS perf_sample_count,
+    (
+      SELECT slice_count
+      FROM _process_track_summary
+      WHERE upid = p.upid
+    ) AS process_slice_count,
+    (
+      SELECT allocation_count
+      FROM _heap_profile_allocation_summary
+      WHERE upid = p.upid
+    ) AS allocation_count,
+    (
+      SELECT graph_object_count
+      FROM _heap_profile_graph_summary
+      WHERE upid = p.upid
+    ) AS graph_object_count
+  FROM process p
+  LEFT JOIN _thread_process_grouped_summary t_summary USING (upid)
+)
+SELECT
+  upid,
+  IFNULL(max_running_dur, 0) AS max_running_dur,
+  IFNULL(sum_running_dur, 0) AS sum_running_dur,
+  IFNULL(running_count, 0) AS running_count,
+  IFNULL(thread_slice_count, 0) AS thread_slice_count,
+  IFNULL(perf_sample_count, 0) AS perf_sample_count,
+  IFNULL(process_slice_count, 0) AS process_slice_count,
+  IFNULL(allocation_count, 0) AS allocation_count,
+  IFNULL(graph_object_count, 0) AS graph_object_count
+FROM r
+WHERE
+  NOT(
+    r.summary_upid IS NULL
+    AND process_slice_count IS NULL
+    AND allocation_count IS NULL
+    AND graph_object_count IS NULL
+  )
+  OR upid IN (SELECT upid FROM process_counter_track);
diff --git a/src/trace_processor/perfetto_sql/stdlib/viz/summary/slices.sql b/src/trace_processor/perfetto_sql/stdlib/viz/summary/slices.sql
new file mode 100644
index 0000000..c698859
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/stdlib/viz/summary/slices.sql
@@ -0,0 +1,24 @@
+--
+-- Copyright 2024 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+--     https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+CREATE PERFETTO TABLE _slice_track_summary AS
+SELECT
+  track_id as id,
+  COUNT() AS cnt,
+  MIN(dur) AS min_dur,
+  MAX(dur) AS max_dur,
+  MAX(depth) AS max_depth
+FROM slice
+GROUP BY track_id;
diff --git a/src/trace_processor/perfetto_sql/stdlib/viz/summary/threads.sql b/src/trace_processor/perfetto_sql/stdlib/viz/summary/threads.sql
new file mode 100644
index 0000000..ed74acf
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/stdlib/viz/summary/threads.sql
@@ -0,0 +1,77 @@
+--
+-- Copyright 2024 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+--     https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+INCLUDE PERFETTO MODULE viz.summary.slices;
+
+CREATE PERFETTO TABLE _sched_summary AS
+SELECT
+  utid,
+  MAX(dur) AS max_running_dur,
+  SUM(dur) AS sum_running_dur,
+  COUNT() AS running_count
+FROM sched
+WHERE utid != 0 AND dur != -1
+GROUP BY utid;
+
+CREATE PERFETTO TABLE _thread_track_summary AS
+SELECT utid, SUM(cnt) AS slice_count
+FROM thread_track
+JOIN _slice_track_summary USING (id)
+GROUP BY utid;
+
+CREATE PERFETTO TABLE _perf_sample_summary AS
+SELECT utid, count() AS perf_sample_cnt
+FROM perf_sample
+WHERE callsite_id IS NOT NULL
+GROUP BY utid;
+
+CREATE PERFETTO TABLE _thread_available_info_summary AS
+WITH raw AS (
+  SELECT
+    utid,
+    ss.max_running_dur,
+    ss.sum_running_dur,
+    ss.running_count,
+    (
+      SELECT slice_count
+      FROM _thread_track_summary
+      WHERE utid = t.utid
+    ) AS slice_count,
+    (
+      SELECT perf_sample_cnt
+      FROM _perf_sample_summary
+      WHERE utid = t.utid
+    ) AS perf_sample_count
+  FROM thread t
+  LEFT JOIN _sched_summary ss USING (utid)
+)
+SELECT
+  utid,
+  IFNULL(max_running_dur, 0) AS max_running_dur,
+  IFNULL(sum_running_dur, 0) AS sum_running_dur,
+  IFNULL(running_count, 0) AS running_count,
+  IFNULL(slice_count, 0) AS slice_count,
+  IFNULL(perf_sample_count, 0) AS perf_sample_count
+FROM raw r
+WHERE
+  NOT (
+    r.max_running_dur IS NULL
+    AND r.sum_running_dur IS NULL
+    AND r.running_count IS NULL
+    AND r.slice_count IS NULL
+    AND r.perf_sample_count IS NULL
+  )
+  OR utid IN (SELECT utid FROM cpu_profile_stack_sample)
+  OR utid IN (SELECT utid FROM thread_counter_track);
diff --git a/src/trace_processor/perfetto_sql/stdlib/viz/summary/tracks.sql b/src/trace_processor/perfetto_sql/stdlib/viz/summary/tracks.sql
new file mode 100644
index 0000000..7a1d5d0
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/stdlib/viz/summary/tracks.sql
@@ -0,0 +1,36 @@
+--
+-- Copyright 2024 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+--     https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+INCLUDE PERFETTO MODULE viz.summary.slices;
+
+CREATE PERFETTO TABLE _process_track_summary_by_upid_and_name AS
+SELECT
+  upid,
+  name,
+  GROUP_CONCAT(id) AS track_ids,
+  COUNT() AS track_count
+FROM process_track
+JOIN _slice_track_summary USING (id)
+GROUP BY upid, name;
+
+CREATE PERFETTO TABLE _uid_track_track_summary_by_uid_and_name AS
+SELECT
+  uid,
+  name,
+  GROUP_CONCAT(id) AS track_ids,
+  COUNT() AS track_count
+FROM uid_track
+JOIN _slice_track_summary USING (id)
+GROUP BY uid, name;
diff --git a/src/trace_processor/types/trace_processor_context.h b/src/trace_processor/types/trace_processor_context.h
index af2837f..3136835 100644
--- a/src/trace_processor/types/trace_processor_context.h
+++ b/src/trace_processor/types/trace_processor_context.h
@@ -131,22 +131,23 @@
   // the GetOrCreate() method on their subclass type, e.g.
   // SyscallTracker::GetOrCreate(context)
   // clang-format off
-  std::unique_ptr<Destructible> android_probes_tracker;  // AndroidProbesTracker
-  std::unique_ptr<Destructible> binder_tracker;          // BinderTracker
-  std::unique_ptr<Destructible> heap_graph_tracker;      // HeapGraphTracker
-  std::unique_ptr<Destructible> syscall_tracker;         // SyscallTracker
-  std::unique_ptr<Destructible> system_info_tracker;     // SystemInfoTracker
-  std::unique_ptr<Destructible> v4l2_tracker;            // V4l2Tracker
-  std::unique_ptr<Destructible> virtio_video_tracker;    // VirtioVideoTracker
-  std::unique_ptr<Destructible> systrace_parser;         // SystraceParser
-  std::unique_ptr<Destructible> thread_state_tracker;    // ThreadStateTracker
-  std::unique_ptr<Destructible> i2c_tracker;             // I2CTracker
-  std::unique_ptr<Destructible> perf_data_tracker;       // PerfDataTracker
-  std::unique_ptr<Destructible> content_analyzer;        // ProtoContentAnalyzer
+  std::unique_ptr<Destructible> android_probes_tracker;    // AndroidProbesTracker
+  std::unique_ptr<Destructible> binder_tracker;            // BinderTracker
+  std::unique_ptr<Destructible> heap_graph_tracker;        // HeapGraphTracker
+  std::unique_ptr<Destructible> syscall_tracker;           // SyscallTracker
+  std::unique_ptr<Destructible> system_info_tracker;       // SystemInfoTracker
+  std::unique_ptr<Destructible> v4l2_tracker;              // V4l2Tracker
+  std::unique_ptr<Destructible> virtio_video_tracker;      // VirtioVideoTracker
+  std::unique_ptr<Destructible> systrace_parser;           // SystraceParser
+  std::unique_ptr<Destructible> thread_state_tracker;      // ThreadStateTracker
+  std::unique_ptr<Destructible> i2c_tracker;               // I2CTracker
+  std::unique_ptr<Destructible> perf_data_tracker;         // PerfDataTracker
+  std::unique_ptr<Destructible> content_analyzer;          // ProtoContentAnalyzer
   std::unique_ptr<Destructible> shell_transitions_tracker; // ShellTransitionsTracker
-  std::unique_ptr<Destructible> ftrace_sched_tracker;    // FtraceSchedEventTracker
-  std::unique_ptr<Destructible> v8_tracker;              // V8Tracker
-  std::unique_ptr<Destructible> jit_tracker;             // JitTracker
+  std::unique_ptr<Destructible> protolog_messages_tracker; // ProtoLogMessagesTracker
+  std::unique_ptr<Destructible> ftrace_sched_tracker;      // FtraceSchedEventTracker
+  std::unique_ptr<Destructible> v8_tracker;                // V8Tracker
+  std::unique_ptr<Destructible> jit_tracker;               // JitTracker
   // clang-format on
 
   // These fields are trace readers which will be called by |forwarding_parser|
diff --git a/src/trace_redaction/BUILD.gn b/src/trace_redaction/BUILD.gn
index be268d7..4f2fb5f 100644
--- a/src/trace_redaction/BUILD.gn
+++ b/src/trace_redaction/BUILD.gn
@@ -28,6 +28,8 @@
 
 source_set("trace_redaction") {
   sources = [
+    "collect_frame_cookies.cc",
+    "collect_frame_cookies.h",
     "collect_timeline_events.cc",
     "collect_timeline_events.h",
     "filter_ftrace_using_allowlist.cc",
@@ -42,6 +44,7 @@
     "filter_task_rename.h",
     "find_package_uid.cc",
     "find_package_uid.h",
+    "frame_cookie.h",
     "optimize_timeline.cc",
     "optimize_timeline.h",
     "populate_allow_lists.cc",
@@ -122,6 +125,7 @@
 perfetto_unittest_source_set("unittests") {
   testonly = true
   sources = [
+    "collect_frame_cookies_unittest.cc",
     "collect_timeline_events_unittest.cc",
     "filter_ftrace_using_allowlist_unittest.cc",
     "filter_packet_using_allowlist_unittest.cc",
@@ -147,6 +151,7 @@
     "../../protos/perfetto/trace:non_minimal_cpp",
     "../../protos/perfetto/trace:zero",
     "../../protos/perfetto/trace/android:cpp",
+    "../../protos/perfetto/trace/android:zero",
     "../../protos/perfetto/trace/ftrace:cpp",
     "../../protos/perfetto/trace/ftrace:zero",
     "../../protos/perfetto/trace/ps:cpp",
diff --git a/src/trace_redaction/collect_frame_cookies.cc b/src/trace_redaction/collect_frame_cookies.cc
new file mode 100644
index 0000000..13e5539
--- /dev/null
+++ b/src/trace_redaction/collect_frame_cookies.cc
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_redaction/collect_frame_cookies.h"
+
+#include "perfetto/base/status.h"
+#include "perfetto/protozero/field.h"
+#include "perfetto/protozero/proto_decoder.h"
+#include "src/trace_redaction/trace_redaction_framework.h"
+
+#include "protos/perfetto/trace/android/frame_timeline_event.pbzero.h"
+#include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+namespace perfetto::trace_redaction {
+
+namespace {
+
+using FrameTimelineEvent = protos::pbzero::FrameTimelineEvent;
+
+struct Frame {
+  uint32_t id;
+  uint32_t pid;
+  uint32_t cookie;
+};
+
+constexpr Frame kActualDisplayFrameStart = {
+    FrameTimelineEvent::kActualDisplayFrameStartFieldNumber,
+    FrameTimelineEvent::ActualDisplayFrameStart::kPidFieldNumber,
+    FrameTimelineEvent::ActualDisplayFrameStart::kCookieFieldNumber,
+};
+
+constexpr Frame kExpectedDisplayFrameStart = {
+    FrameTimelineEvent::kExpectedDisplayFrameStartFieldNumber,
+    FrameTimelineEvent::ExpectedDisplayFrameStart::kPidFieldNumber,
+    FrameTimelineEvent::ExpectedDisplayFrameStart::kCookieFieldNumber,
+};
+
+constexpr Frame kActualSurfaceFrameStart = {
+    FrameTimelineEvent::kActualSurfaceFrameStartFieldNumber,
+    FrameTimelineEvent::ActualSurfaceFrameStart::kPidFieldNumber,
+    FrameTimelineEvent::ActualSurfaceFrameStart::kCookieFieldNumber,
+};
+
+constexpr Frame kExpectedSurfaceFrameStart = {
+    FrameTimelineEvent::kExpectedSurfaceFrameStartFieldNumber,
+    FrameTimelineEvent::ExpectedSurfaceFrameStart::kPidFieldNumber,
+    FrameTimelineEvent::ExpectedSurfaceFrameStart::kCookieFieldNumber,
+};
+
+// Do not use `pid` from `kFrameEnd`.
+constexpr Frame kFrameEnd = {
+    FrameTimelineEvent::kFrameEndFieldNumber,
+    0,
+    FrameTimelineEvent::FrameEnd::kCookieFieldNumber,
+};
+
+}  // namespace
+
+base::Status CollectFrameCookies::Begin(Context* context) const {
+  if (context->global_frame_cookies.empty()) {
+    return base::OkStatus();
+  }
+
+  return base::ErrStatus("FindFrameCookies: frame cookies already populated");
+}
+
+base::Status CollectFrameCookies::Collect(
+    const protos::pbzero::TracePacket::Decoder& packet,
+    Context* context) const {
+  // A frame cookie needs a time and pid for a timeline query. Ignore packets
+  // without a timestamp.
+  if (!packet.has_timestamp() || !packet.has_frame_timeline_event()) {
+    return base::OkStatus();
+  }
+
+  auto timestamp = packet.timestamp();
+
+  // Only use the start frames. They are the only ones with a pid. End events
+  // use the cookies to reference the pid in a start event.
+  auto handlers = {
+      kActualDisplayFrameStart,
+      kActualSurfaceFrameStart,
+      kExpectedDisplayFrameStart,
+      kExpectedSurfaceFrameStart,
+  };
+
+  // Timeline Event Decoder.
+  protozero::ProtoDecoder decoder(packet.frame_timeline_event());
+
+  // If no handler worked, cookie will not get added to the global cookie field.
+  for (const auto& handler : handlers) {
+    auto outer = decoder.FindField(handler.id);
+
+    if (!outer.valid()) {
+      continue;
+    }
+
+    protozero::ProtoDecoder inner(outer.as_bytes());
+
+    auto pid = inner.FindField(handler.pid);
+    auto cookie = inner.FindField(handler.cookie);
+
+    // This should be handled, but it is not valid. Drop the event by not adding
+    // it to the global_frame_cookies list.
+    if (!pid.valid() || !cookie.valid()) {
+      continue;
+    }
+
+    FrameCookie frame_cookie;
+    frame_cookie.pid = pid.as_int32();
+    frame_cookie.cookie = cookie.as_int64();
+    frame_cookie.ts = timestamp;
+
+    context->global_frame_cookies.push_back(frame_cookie);
+
+    break;
+  }
+
+  return base::OkStatus();
+}
+
+base::Status ReduceFrameCookies::Build(Context* context) const {
+  if (!context->package_uid.has_value()) {
+    return base::ErrStatus("ReduceFrameCookies: missing package uid.");
+  }
+
+  if (!context->timeline) {
+    return base::ErrStatus("ReduceFrameCookies: missing timeline.");
+  }
+
+  // Even though it is rare, it is possible for there to be no SurfaceFlinger
+  // frame cookies. Even through the main path handles this, we use this early
+  // exit to document this edge case.
+  if (context->global_frame_cookies.empty()) {
+    return base::OkStatus();
+  }
+
+  const auto* timeline = context->timeline.get();
+  auto uid = context->package_uid.value();
+
+  auto& package_frame_cookies = context->package_frame_cookies;
+
+  // Filter the global cookies down to cookies that belong to the target package
+  // (uid).
+  for (const auto& cookie : context->global_frame_cookies) {
+    auto cookie_slice = timeline->Search(cookie.ts, cookie.pid);
+
+    if (cookie_slice.uid == uid) {
+      package_frame_cookies.insert(cookie.cookie);
+    }
+  }
+
+  return base::OkStatus();
+}
+
+bool FilterFrameEvents::KeepField(const Context& context,
+                                  const protozero::Field& field) const {
+  // If this field is not a timeline event, then this primitive has no reason to
+  // reject this field.
+  //
+  // If it is a timeline event, the event's cookie must be in the package's
+  // cookies.
+  if (field.id() !=
+      protos::pbzero::TracePacket::kFrameTimelineEventFieldNumber) {
+    return true;
+  }
+
+  protozero::ProtoDecoder timeline_event_decoder(field.as_bytes());
+
+  auto handlers = {
+      kActualDisplayFrameStart,
+      kActualSurfaceFrameStart,
+      kExpectedDisplayFrameStart,
+      kExpectedSurfaceFrameStart,
+      kFrameEnd,
+  };
+
+  const auto& cookies = context.package_frame_cookies;
+
+  for (const auto& handler : handlers) {
+    auto event = timeline_event_decoder.FindField(handler.id);
+
+    if (!event.valid()) {
+      continue;
+    }
+
+    protozero::ProtoDecoder event_decoder(event.as_bytes());
+
+    auto cookie = event_decoder.FindField(handler.cookie);
+
+    if (cookie.valid() && cookies.count(cookie.as_int64())) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+}  // namespace perfetto::trace_redaction
diff --git a/src/trace_redaction/collect_frame_cookies.h b/src/trace_redaction/collect_frame_cookies.h
new file mode 100644
index 0000000..7463ff6
--- /dev/null
+++ b/src/trace_redaction/collect_frame_cookies.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_REDACTION_COLLECT_FRAME_COOKIES_H_
+#define SRC_TRACE_REDACTION_COLLECT_FRAME_COOKIES_H_
+
+#include "perfetto/protozero/field.h"
+#include "src/trace_redaction/scrub_trace_packet.h"
+#include "src/trace_redaction/trace_redaction_framework.h"
+
+#include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+namespace perfetto::trace_redaction {
+
+// Populates Context::global_frame_cookies using FrameTimelineEvent messages.
+class CollectFrameCookies : public CollectPrimitive {
+ public:
+  base::Status Begin(Context* context) const override;
+
+  base::Status Collect(const protos::pbzero::TracePacket::Decoder& packet,
+                       Context* context) const override;
+
+ private:
+  void OnTimelineEvent(const protos::pbzero::TracePacket::Decoder& packet,
+                       protozero::ConstBytes bytes,
+                       Context* context) const;
+};
+
+// Moves cookies from Context::global_frame_cookies to
+// Context::package_frame_cookies using Cookies::timeline and
+// Cookies::package_uid.
+class ReduceFrameCookies : public BuildPrimitive {
+ public:
+  base::Status Build(Context* context) const override;
+};
+
+// Flags start-frame and end-frame events as keep/drop using
+// Context::package_frame_cookies.
+class FilterFrameEvents : public TracePacketFilter {
+ public:
+  bool KeepField(const Context& context,
+                 const protozero::Field& field) const override;
+};
+
+}  // namespace perfetto::trace_redaction
+
+#endif  // SRC_TRACE_REDACTION_COLLECT_FRAME_COOKIES_H_
diff --git a/src/trace_redaction/collect_frame_cookies_unittest.cc b/src/trace_redaction/collect_frame_cookies_unittest.cc
new file mode 100644
index 0000000..6e77231
--- /dev/null
+++ b/src/trace_redaction/collect_frame_cookies_unittest.cc
@@ -0,0 +1,346 @@
+
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_redaction/collect_frame_cookies.h"
+#include "src/base/test/status_matchers.h"
+#include "src/trace_redaction/collect_timeline_events.h"
+#include "test/gtest_and_gmock.h"
+
+#include "protos/perfetto/trace/android/frame_timeline_event.gen.h"
+#include "protos/perfetto/trace/android/frame_timeline_event.pbzero.h"
+#include "protos/perfetto/trace/trace_packet.gen.h"
+#include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+namespace perfetto::trace_redaction {
+namespace {
+
+constexpr uint64_t kTimestampA = 0;
+constexpr uint64_t kTimestampB = 1000;
+constexpr uint64_t kTimestampC = 2000;
+constexpr uint64_t kTimestampD = 3000;
+constexpr uint64_t kTimestampE = 3000;
+
+constexpr int64_t kCookieA = 1234;
+
+// Start at 1, amd not zero, because zero hnas special meaning (system uid).
+constexpr uint64_t kUidA = 1;
+
+constexpr int32_t kPidNone = 10;
+constexpr int32_t kPidA = 11;
+
+}  // namespace
+
+class FrameCookieFixture {
+ protected:
+  std::string CreateStartEvent(int32_t field_id,
+                               uint64_t ts,
+                               int32_t pid,
+                               int64_t cookie) const {
+    protos::gen::TracePacket packet;
+    packet.set_timestamp(ts);
+
+    switch (field_id) {
+      case protos::pbzero::FrameTimelineEvent::
+          kExpectedSurfaceFrameStartFieldNumber:
+        CreateExpectedSurfaceFrameStart(pid, cookie,
+                                        packet.mutable_frame_timeline_event());
+        break;
+
+      case protos::pbzero::FrameTimelineEvent::
+          kActualSurfaceFrameStartFieldNumber:
+        CreateActualSurfaceFrameStart(pid, cookie,
+                                      packet.mutable_frame_timeline_event());
+        break;
+
+      case protos::pbzero::FrameTimelineEvent::
+          kExpectedDisplayFrameStartFieldNumber:
+        CreateExpectedDisplayFrameStart(pid, cookie,
+                                        packet.mutable_frame_timeline_event());
+        break;
+
+      case protos::pbzero::FrameTimelineEvent::
+          kActualDisplayFrameStartFieldNumber:
+        CreateActualDisplayFrameStart(pid, cookie,
+                                      packet.mutable_frame_timeline_event());
+        break;
+
+      default:
+        PERFETTO_FATAL("Invalid field id");
+        break;
+    }
+
+    return packet.SerializeAsString();
+  }
+
+  std::string CreateFrameEnd(uint64_t ts, int64_t cookie) const {
+    protos::gen::TracePacket packet;
+    packet.set_timestamp(ts);
+
+    auto* start = packet.mutable_frame_timeline_event()->mutable_frame_end();
+    start->set_cookie(cookie);
+
+    return packet.SerializeAsString();
+  }
+
+  void CollectEvents(std::initializer_list<ProcessThreadTimeline::Event> events,
+                     Context* context) const {
+    CollectTimelineEvents collect;
+    ASSERT_OK(collect.Begin(context));
+
+    for (const auto& event : events) {
+      context->timeline->Append(event);
+    }
+
+    ASSERT_OK(collect.End(context));
+  }
+
+  void CollectCookies(std::initializer_list<std::string> packets,
+                      Context* context) const {
+    CollectFrameCookies collect;
+    ASSERT_OK(collect.Begin(context));
+
+    for (const auto& packet : packets) {
+      protos::pbzero::TracePacket::Decoder decoder(packet);
+      ASSERT_OK(collect.Collect(decoder, context));
+    }
+
+    ASSERT_OK(collect.End(context));
+  }
+
+ private:
+  void CreateExpectedSurfaceFrameStart(
+      int32_t pid,
+      int64_t cookie,
+      protos::gen::FrameTimelineEvent* event) const {
+    auto* start = event->mutable_expected_surface_frame_start();
+    start->set_cookie(cookie);
+    start->set_pid(pid);
+  }
+
+  void CreateActualSurfaceFrameStart(
+      int32_t pid,
+      int64_t cookie,
+      protos::gen::FrameTimelineEvent* event) const {
+    auto* start = event->mutable_actual_surface_frame_start();
+    start->set_cookie(cookie);
+    start->set_pid(pid);
+  }
+
+  void CreateExpectedDisplayFrameStart(
+      int32_t pid,
+      int64_t cookie,
+      protos::gen::FrameTimelineEvent* event) const {
+    auto* start = event->mutable_expected_display_frame_start();
+    start->set_cookie(cookie);
+    start->set_pid(pid);
+  }
+
+  void CreateActualDisplayFrameStart(
+      int32_t pid,
+      int64_t cookie,
+      protos::gen::FrameTimelineEvent* event) const {
+    auto* start = event->mutable_actual_display_frame_start();
+    start->set_cookie(cookie);
+    start->set_pid(pid);
+  }
+};
+
+class CollectFrameCookiesTest : public testing::Test,
+                                protected FrameCookieFixture,
+                                public testing::WithParamInterface<int32_t> {
+ protected:
+  Context context_;
+};
+
+TEST_P(CollectFrameCookiesTest, ExtractsExpectedSurfaceFrameStart) {
+  auto field_id = GetParam();
+
+  auto packet = CreateStartEvent(field_id, kTimestampA, kPidA, kCookieA);
+
+  CollectCookies({packet}, &context_);
+
+  ASSERT_EQ(context_.global_frame_cookies.size(), 1u);
+
+  auto& cookie = context_.global_frame_cookies.back();
+  ASSERT_EQ(cookie.cookie, kCookieA);
+  ASSERT_EQ(cookie.pid, kPidA);
+  ASSERT_EQ(cookie.ts, kTimestampA);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+    EveryStartEventType,
+    CollectFrameCookiesTest,
+    testing::Values(
+        protos::pbzero::FrameTimelineEvent::
+            kExpectedSurfaceFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::kActualSurfaceFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::
+            kExpectedDisplayFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::
+            kActualDisplayFrameStartFieldNumber));
+
+// End events have no influence during the collect phase because they don't have
+// a direct connection to a process. They're indirectly connected to a pid via a
+// start event (via a common cookie value).
+TEST_F(CollectFrameCookiesTest, IgnoresFrameEnd) {
+  CollectCookies({CreateFrameEnd(kTimestampA, kPidA)}, &context_);
+
+  ASSERT_TRUE(context_.global_frame_cookies.empty());
+}
+
+class ReduceFrameCookiesTest : public testing::Test,
+                               protected FrameCookieFixture,
+                               public testing::WithParamInterface<int32_t> {
+ protected:
+  void SetUp() {
+    context_.package_uid = kUidA;
+
+    // Time A   +- Time B       +- Time C    +- Time D   +- Time E
+    //          |                            |
+    //          +------------ Pid A ---------+
+    //
+    // The pid will be active from time b to time d. Time A will be used for
+    // "before active". Time C will be used for "while active". Time E will be
+    // used for "after active".
+    CollectEvents(
+        {
+            ProcessThreadTimeline::Event::Open(kTimestampB, kPidA, kPidNone,
+                                               kUidA),
+            ProcessThreadTimeline::Event::Close(kTimestampD, kPidA),
+        },
+        &context_);
+  }
+
+  ReduceFrameCookies reduce_;
+  Context context_;
+};
+
+TEST_P(ReduceFrameCookiesTest, RejectBeforeActive) {
+  auto field_id = GetParam();
+
+  // kTimestampA is before pid starts.
+  auto packet = CreateStartEvent(field_id, kTimestampA, kPidA, kCookieA);
+
+  CollectCookies({packet}, &context_);
+
+  ASSERT_OK(reduce_.Build(&context_));
+  ASSERT_FALSE(context_.package_frame_cookies.count(kCookieA));
+}
+
+TEST_P(ReduceFrameCookiesTest, AcceptDuringActive) {
+  auto field_id = GetParam();
+
+  // kTimestampC is between pid starts and ends.
+  auto packet = CreateStartEvent(field_id, kTimestampC, kPidA, kCookieA);
+
+  CollectCookies({packet}, &context_);
+
+  ASSERT_OK(reduce_.Build(&context_));
+  ASSERT_TRUE(context_.package_frame_cookies.count(kCookieA));
+}
+
+TEST_P(ReduceFrameCookiesTest, RejectAfterActive) {
+  auto field_id = GetParam();
+
+  // kTimestampE is after pid ends.
+  auto packet = CreateStartEvent(field_id, kTimestampE, kPidA, kCookieA);
+
+  CollectCookies({packet}, &context_);
+
+  ASSERT_OK(reduce_.Build(&context_));
+  ASSERT_FALSE(context_.package_frame_cookies.count(kCookieA));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+    EveryStartEventType,
+    ReduceFrameCookiesTest,
+    testing::Values(
+        protos::pbzero::FrameTimelineEvent::
+            kExpectedSurfaceFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::kActualSurfaceFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::
+            kExpectedDisplayFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::
+            kActualDisplayFrameStartFieldNumber));
+
+class FilterCookiesFieldsTest : public testing::Test,
+                                protected FrameCookieFixture,
+                                public testing::WithParamInterface<int32_t> {
+ protected:
+  protozero::Field ExtractTimelineEvent(const std::string& packet) const {
+    protozero::ProtoDecoder packet_decoder(packet);
+
+    // There must be one in order for the test to work, so we assume it's there.
+    return packet_decoder.FindField(
+        protos::pbzero::TracePacket::kFrameTimelineEventFieldNumber);
+  }
+
+  FilterFrameEvents filter_;
+  Context context_;
+};
+
+// If the event was within a valid pid's lifespan and was connected to the
+// package, it should be kept.
+TEST_P(FilterCookiesFieldsTest, IncludeIncludedStartCookies) {
+  context_.package_frame_cookies.insert(kCookieA);
+
+  auto field_id = GetParam();
+  auto packet = CreateStartEvent(field_id, kTimestampA, kPidA, kCookieA);
+  auto timeline_field = ExtractTimelineEvent(packet);
+
+  ASSERT_TRUE(filter_.KeepField(context_, timeline_field));
+}
+
+// If the event wasn't within a valid pid's lifespans and/or was connected to a
+// package, it should be removed.
+TEST_P(FilterCookiesFieldsTest, ExcludeMissingStartCookies) {
+  auto field_id = GetParam();
+  auto packet = CreateStartEvent(field_id, kTimestampA, kPidA, kCookieA);
+  auto timeline_field = ExtractTimelineEvent(packet);
+
+  ASSERT_FALSE(filter_.KeepField(context_, timeline_field));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+    EveryStartEventType,
+    FilterCookiesFieldsTest,
+    testing::Values(
+        protos::pbzero::FrameTimelineEvent::
+            kExpectedSurfaceFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::kActualSurfaceFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::
+            kExpectedDisplayFrameStartFieldNumber,
+        protos::pbzero::FrameTimelineEvent::
+            kActualDisplayFrameStartFieldNumber));
+
+TEST_F(FilterCookiesFieldsTest, IncludeIncludedEndCookies) {
+  context_.package_frame_cookies.insert(kCookieA);
+
+  auto packet = CreateFrameEnd(kTimestampA, kCookieA);
+  auto timeline_field = ExtractTimelineEvent(packet);
+
+  ASSERT_TRUE(filter_.KeepField(context_, timeline_field));
+}
+
+TEST_F(FilterCookiesFieldsTest, ExcludeMissingEndCookies) {
+  auto packet = CreateFrameEnd(kTimestampA, kCookieA);
+  auto timeline_field = ExtractTimelineEvent(packet);
+
+  ASSERT_FALSE(filter_.KeepField(context_, timeline_field));
+}
+
+}  // namespace perfetto::trace_redaction
diff --git a/src/trace_redaction/frame_cookie.h b/src/trace_redaction/frame_cookie.h
new file mode 100644
index 0000000..bd96804
--- /dev/null
+++ b/src/trace_redaction/frame_cookie.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_REDACTION_FRAME_COOKIE_H_
+#define SRC_TRACE_REDACTION_FRAME_COOKIE_H_
+
+#include <cstdint>
+
+struct FrameCookie {
+  // The timestamp from the trace packet.
+  uint64_t ts;
+
+  // The cookie value will be found inside of the start event (there are four
+  // different start types). This is the app's pid (main thread id).
+
+  // ExpectedSurfaceFrameStart: pid = app id
+  // ActualSurfaceFrameStart: pid = app id
+
+  // ExpectedDisplayFrameStart: pid = surface flinger
+  // ActualDisplayFrameStart: pid = surface flinger
+  int32_t pid;
+
+  // The cookie value will be found inside of the start event (there are four
+  // different start types). End events use the cookie to connect to the start
+  // event. Therefore end events don't need a pid.
+  int64_t cookie;
+};
+
+#endif  // SRC_TRACE_REDACTION_FRAME_COOKIE_H_
diff --git a/src/trace_redaction/main.cc b/src/trace_redaction/main.cc
index 656fdef..2e433c0 100644
--- a/src/trace_redaction/main.cc
+++ b/src/trace_redaction/main.cc
@@ -16,6 +16,7 @@
 
 #include "perfetto/base/logging.h"
 #include "perfetto/base/status.h"
+#include "src/trace_redaction/collect_frame_cookies.h"
 #include "src/trace_redaction/collect_timeline_events.h"
 #include "src/trace_redaction/filter_ftrace_using_allowlist.h"
 #include "src/trace_redaction/filter_packet_using_allowlist.h"
@@ -49,15 +50,18 @@
   // Add all collectors.
   redactor.emplace_collect<FindPackageUid>();
   redactor.emplace_collect<CollectTimelineEvents>();
+  redactor.emplace_collect<CollectFrameCookies>();
 
   // Add all builders.
   redactor.emplace_build<PopulateAllowlists>();
   redactor.emplace_build<AllowSuspendResume>();
   redactor.emplace_build<OptimizeTimeline>();
+  redactor.emplace_build<ReduceFrameCookies>();
 
   // Add all transforms.
   auto* scrub_packet = redactor.emplace_transform<ScrubTracePacket>();
   scrub_packet->emplace_back<FilterPacketUsingAllowlist>();
+  scrub_packet->emplace_back<FilterFrameEvents>();
 
   auto* scrub_ftrace_events = redactor.emplace_transform<ScrubFtraceEvents>();
   scrub_ftrace_events->emplace_back<FilterFtraceUsingAllowlist>();
diff --git a/src/trace_redaction/scrub_trace_packet.cc b/src/trace_redaction/scrub_trace_packet.cc
index 77ff55c..fd29885 100644
--- a/src/trace_redaction/scrub_trace_packet.cc
+++ b/src/trace_redaction/scrub_trace_packet.cc
@@ -27,6 +27,10 @@
 
 TracePacketFilter::~TracePacketFilter() = default;
 
+base::Status TracePacketFilter::VerifyContext(const Context&) const {
+  return base::OkStatus();
+}
+
 base::Status ScrubTracePacket::Transform(const Context& context,
                                          std::string* packet) const {
   if (packet == nullptr || packet->empty()) {
diff --git a/src/trace_redaction/scrub_trace_packet.h b/src/trace_redaction/scrub_trace_packet.h
index 331cccf..22e506b 100644
--- a/src/trace_redaction/scrub_trace_packet.h
+++ b/src/trace_redaction/scrub_trace_packet.h
@@ -26,7 +26,7 @@
   virtual ~TracePacketFilter();
 
   // Checks if the context contains all neccessary parameters.
-  virtual base::Status VerifyContext(const Context& context) const = 0;
+  virtual base::Status VerifyContext(const Context& context) const;
 
   // Checks if the field should be pass onto the new packet. Checks are a
   // logical AND, so all filters must return true.
diff --git a/src/trace_redaction/trace_redaction_framework.h b/src/trace_redaction/trace_redaction_framework.h
index f741f9b..cc135a9 100644
--- a/src/trace_redaction/trace_redaction_framework.h
+++ b/src/trace_redaction/trace_redaction_framework.h
@@ -21,9 +21,12 @@
 #include <memory>
 #include <optional>
 #include <string>
+#include <unordered_set>
+#include <vector>
 
 #include "perfetto/base/flat_set.h"
 #include "perfetto/base/status.h"
+#include "src/trace_redaction/frame_cookie.h"
 #include "src/trace_redaction/process_thread_timeline.h"
 
 #include "protos/perfetto/trace/trace_packet.pbzero.h"
@@ -186,6 +189,34 @@
   // After Sort(), Flatten() and Reduce() can be called (optional) to improve
   // the practical look-up times (compared to theoretical look-up times).
   std::unique_ptr<ProcessThreadTimeline> timeline;
+
+  // All frame events:
+  //
+  //  - ActualDisplayFrame
+  //  - ActualSurfaceFrame
+  //  - ExpectedDisplayFrame
+  //  - ExpectedSurfaceFrame
+  //
+  // Connect a time, a pid, and a cookie value. Cookies are unqiue within a
+  // trace, so if a cookie was connected to the target package, it can always be
+  // used.
+  //
+  // End events (i.e. FrameEnd) only have a time and cookie value. The cookie
+  // value connects it to its start time.
+  //
+  // In the collect phase, all start events are collected and converted to a
+  // simpler structure.
+  //
+  // In the build phase, the cookies are filtered to only include the ones that
+  // belong to the target package. This is down in the build phase, and not the
+  // collect phase, because the timeline is needed to determine if the cookie
+  // belongs to the target package.
+  std::vector<FrameCookie> global_frame_cookies;
+
+  // The collect of cookies that belong to the target package. Because cookie
+  // values are unique within the scope of the trace, pid and time are no longer
+  // needed and a set can be used for faster queries.
+  std::unordered_set<int64_t> package_frame_cookies;
 };
 
 // Extracts low-level data from the trace and writes it into the context. The
diff --git a/src/traced/probes/ftrace/event_info.cc b/src/traced/probes/ftrace/event_info.cc
index e11184b..09615fe 100644
--- a/src/traced/probes/ftrace/event_info.cc
+++ b/src/traced/probes/ftrace/event_info.cc
@@ -7597,16 +7597,22 @@
        "panel",
        {
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "type", 1, ProtoSchemaType::kUint32,
+            "pid", 1, ProtoSchemaType::kInt32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "pid", 2, ProtoSchemaType::kInt32,
+            "trace_name", 2, ProtoSchemaType::kString,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "name", 3, ProtoSchemaType::kString,
+            "trace_begin", 3, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "value", 4, ProtoSchemaType::kInt32,
+            "name", 4, ProtoSchemaType::kString,
+            TranslationStrategy::kInvalidTranslationStrategy},
+           {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
+            "type", 5, ProtoSchemaType::kUint32,
+            TranslationStrategy::kInvalidTranslationStrategy},
+           {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
+            "value", 6, ProtoSchemaType::kInt32,
             TranslationStrategy::kInvalidTranslationStrategy},
        },
        kUnsetFtraceId,
@@ -7616,55 +7622,55 @@
        "perf_trace_counters",
        {
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "prev_comm", 1, ProtoSchemaType::kString,
+            "old_pid", 1, ProtoSchemaType::kInt32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "prev_pid", 2, ProtoSchemaType::kInt32,
+            "new_pid", 2, ProtoSchemaType::kInt32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "cyc", 3, ProtoSchemaType::kUint32,
+            "cctr", 3, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "inst", 4, ProtoSchemaType::kUint32,
+            "ctr0", 4, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "stallbm", 5, ProtoSchemaType::kUint32,
+            "ctr1", 5, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "l3dm", 6, ProtoSchemaType::kUint32,
+            "ctr2", 6, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "old_pid", 7, ProtoSchemaType::kInt32,
+            "ctr3", 7, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "new_pid", 8, ProtoSchemaType::kInt32,
+            "lctr0", 8, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "cctr", 9, ProtoSchemaType::kUint32,
+            "lctr1", 9, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "ctr0", 10, ProtoSchemaType::kUint32,
+            "ctr4", 10, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "ctr1", 11, ProtoSchemaType::kUint32,
+            "ctr5", 11, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "ctr2", 12, ProtoSchemaType::kUint32,
+            "prev_comm", 12, ProtoSchemaType::kString,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "ctr3", 13, ProtoSchemaType::kUint32,
+            "prev_pid", 13, ProtoSchemaType::kInt32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "lctr0", 14, ProtoSchemaType::kUint32,
+            "cyc", 14, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "lctr1", 15, ProtoSchemaType::kUint32,
+            "inst", 15, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "ctr4", 16, ProtoSchemaType::kUint32,
+            "stallbm", 16, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
            {kUnsetOffset, kUnsetSize, FtraceFieldType::kInvalidFtraceFieldType,
-            "ctr5", 17, ProtoSchemaType::kUint32,
+            "l3dm", 17, ProtoSchemaType::kUint32,
             TranslationStrategy::kInvalidTranslationStrategy},
        },
        kUnsetFtraceId,
diff --git a/ui/src/common/actions.ts b/ui/src/common/actions.ts
index 4910cbd..92cad97 100644
--- a/ui/src/common/actions.ts
+++ b/ui/src/common/actions.ts
@@ -1162,13 +1162,6 @@
       );
   },
 
-  setPivotTableArgumentNames(
-    state: StateDraft,
-    args: {argumentNames: string[]},
-  ) {
-    state.nonSerializableState.pivotTable.argumentNames = args.argumentNames;
-  },
-
   changePivotTablePivotOrder(
     state: StateDraft,
     args: {from: number; to: number; direction: DropDirection},
diff --git a/ui/src/common/empty_state.ts b/ui/src/common/empty_state.ts
index 5bf1f5a..f866914 100644
--- a/ui/src/common/empty_state.ts
+++ b/ui/src/common/empty_state.ts
@@ -83,7 +83,6 @@
       ],
       constrainToArea: true,
       queryRequested: false,
-      argumentNames: [],
     },
   };
 }
diff --git a/ui/src/common/state.ts b/ui/src/common/state.ts
index 3b27e16..d0af73b 100644
--- a/ui/src/common/state.ts
+++ b/ui/src/common/state.ts
@@ -424,9 +424,6 @@
   // Set to true by frontend to request controller to perform the query to
   // acquire the necessary data from the engine.
   queryRequested: boolean;
-
-  // Argument names in the current trace, used for autocompletion purposes.
-  argumentNames: string[];
 }
 
 export interface LoadedConfigNone {
diff --git a/ui/src/controller/pivot_table_controller.ts b/ui/src/controller/pivot_table_controller.ts
index b1eb386..663fdf4 100644
--- a/ui/src/controller/pivot_table_controller.ts
+++ b/ui/src/controller/pivot_table_controller.ts
@@ -31,7 +31,7 @@
 } from '../frontend/pivot_table_query_generator';
 import {Aggregation, PivotTree} from '../frontend/pivot_table_types';
 import {Engine} from '../trace_processor/engine';
-import {ColumnType, STR} from '../trace_processor/query_result';
+import {ColumnType} from '../trace_processor/query_result';
 
 import {Controller} from './controller';
 
@@ -189,7 +189,6 @@
   engine: Engine;
   lastQueryAreaId = '';
   lastQueryAreaTracks = new Set<string>();
-  requestedArgumentNames = false;
 
   constructor(args: {engine: Engine}) {
     super({});
@@ -272,31 +271,11 @@
     );
   }
 
-  async requestArgumentNames() {
-    this.requestedArgumentNames = true;
-    const result = await this.engine.query(`
-      select distinct flat_key from args
-    `);
-    const it = result.iter({flat_key: STR});
-
-    const argumentNames = [];
-    while (it.valid()) {
-      argumentNames.push(it.flat_key);
-      it.next();
-    }
-
-    globals.dispatch(Actions.setPivotTableArgumentNames({argumentNames}));
-  }
-
   run() {
     if (!PIVOT_TABLE_REDUX_FLAG.get()) {
       return;
     }
 
-    if (!this.requestedArgumentNames) {
-      this.requestArgumentNames();
-    }
-
     const pivotTableState = globals.state.nonSerializableState.pivotTable;
     const selection = getLegacySelection(globals.state);
 
diff --git a/ui/src/controller/trace_controller.ts b/ui/src/controller/trace_controller.ts
index 5194215..c1f0d6e 100644
--- a/ui/src/controller/trace_controller.ts
+++ b/ui/src/controller/trace_controller.ts
@@ -201,20 +201,14 @@
 // ensure it's only run once.
 async function defineMaxLayoutDepthSqlFunction(engine: Engine): Promise<void> {
   await engine.query(`
-    create or replace perfetto table __max_layout_depth_state as
-    select track_id, max(depth) as max_depth
-    from slice
-    group by track_id
-    order by track_id;
-
     create perfetto function __max_layout_depth(track_count INT, track_ids STRING)
     returns INT AS
     select iif(
       $track_count = 1,
       (
         select max_depth
-        from __max_layout_depth_state
-        where track_id = cast($track_ids AS int)
+        from _slice_track_summary
+        where id = cast($track_ids AS int)
       ),
       (
         select max(layout_depth)
@@ -517,6 +511,7 @@
 
     // Make sure the helper views are available before we start adding tracks.
     await this.initialiseHelperViews();
+    await this.includeSummaryTables();
 
     await defineMaxLayoutDepthSqlFunction(engine);
 
@@ -1097,6 +1092,22 @@
     }
   }
 
+  async includeSummaryTables() {
+    const engine = assertExists<Engine>(this.engine);
+
+    this.updateStatus('Creating slice summaries');
+    await engine.query(`include perfetto module viz.summary.slices;`);
+
+    this.updateStatus('Creating thread summaries');
+    await engine.query(`include perfetto module viz.summary.threads;`);
+
+    this.updateStatus('Creating processes summaries');
+    await engine.query(`include perfetto module viz.summary.processes;`);
+
+    this.updateStatus('Creating track summaries');
+    await engine.query(`include perfetto module viz.summary.tracks;`);
+  }
+
   private updateStatus(msg: string): void {
     globals.dispatch(
       Actions.updateStatus({
diff --git a/ui/src/controller/track_decider.ts b/ui/src/controller/track_decider.ts
index d8f42fb..b6dd388 100644
--- a/ui/src/controller/track_decider.ts
+++ b/ui/src/controller/track_decider.ts
@@ -167,64 +167,35 @@
 
   async addGlobalAsyncTracks(engine: EngineProxy): Promise<void> {
     const rawGlobalAsyncTracks = await engine.query(`
-      with tracks_with_slices as materialized (
-        select distinct track_id
-        from slice
-      ),
-      global_tracks as (
-        select
-          track.parent_id as parent_id,
-          track.id as track_id,
-          track.name as name
-        from track
-        join tracks_with_slices on tracks_with_slices.track_id = track.id
-        where
-          track.type = "track"
-          or track.type = "gpu_track"
-          or track.type = "cpu_track"
-      ),
-      global_tracks_grouped as (
-        select
-          parent_id,
-          name,
-          group_concat(track_id) as trackIds,
-          count(track_id) as trackCount
-        from global_tracks track
-        group by parent_id, name
+      with global_tracks_grouped as (
+        select distinct t.parent_id, t.name
+        from track t
+        join _slice_track_summary using (id)
+        where t.type in ('track', 'gpu_track', 'cpu_track')
       )
       select
-        t.parent_id as parentId,
-        p.name as parentName,
         t.name as name,
-        t.trackIds as trackIds,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
+        t.parent_id as parentId,
+        p.name as parentName
       from global_tracks_grouped AS t
       left join track p on (t.parent_id = p.id)
-      order by p.name, t.name;
+      order by p.name, t.name
     `);
     const it = rawGlobalAsyncTracks.iter({
       name: STR_NULL,
-      parentName: STR_NULL,
       parentId: NUM_NULL,
-      maxDepth: NUM_NULL,
+      parentName: STR_NULL,
     });
 
     const parentIdToGroupId = new Map<number, string>();
-
     for (; it.valid(); it.next()) {
       const kind = ASYNC_SLICE_TRACK_KIND;
       const rawName = it.name === null ? undefined : it.name;
       const rawParentName = it.parentName === null ? undefined : it.parentName;
       const name = getTrackName({name: rawName, kind});
       const parentTrackId = it.parentId;
-      const maxDepth = it.maxDepth;
       let trackGroup = SCROLLING_TRACK_GROUP;
 
-      // If there are no slices in this track, skip it.
-      if (maxDepth === null) {
-        continue;
-      }
-
       if (parentTrackId !== null) {
         const groupId = parentIdToGroupId.get(parentTrackId);
         if (groupId === undefined) {
@@ -261,11 +232,11 @@
       // Only add a gpu freq track if we have
       // gpu freq data.
       const freqExistsResult = await engine.query(`
-      select *
-      from gpu_counter_track
-      where name = 'gpufreq' and gpu_id = ${gpu}
-      limit 1;
-    `);
+        select *
+        from gpu_counter_track
+        where name = 'gpufreq' and gpu_id = ${gpu}
+        limit 1;
+      `);
       if (freqExistsResult.numRows() > 0) {
         this.tracksToAdd.push({
           uri: `perfetto.Counter#gpu_freq${gpu}`,
@@ -638,14 +609,14 @@
 
   async addThreadStateTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-      with ts_distinct as materialized (select distinct utid from thread_state)
       select
         utid,
         upid,
         tid,
         thread.name as threadName
       from thread
-      where utid != 0 and utid in ts_distinct`);
+      join _sched_summary using (utid)
+    `);
 
     const it = result.iter({
       utid: NUM,
@@ -726,16 +697,16 @@
 
   async addThreadCounterTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-    select
-      thread_counter_track.name as trackName,
-      utid,
-      upid,
-      tid,
-      thread.name as threadName,
-      thread_counter_track.id as trackId
-    from thread_counter_track
-    join thread using(utid)
-    where thread_counter_track.name != 'thread_time'
+      select
+        thread_counter_track.name as trackName,
+        utid,
+        upid,
+        tid,
+        thread.name as threadName,
+        thread_counter_track.id as trackId
+      from thread_counter_track
+      join thread using(utid)
+      where thread_counter_track.name != 'thread_time'
   `);
 
     const it = result.iter({
@@ -776,27 +747,15 @@
 
   async addProcessAsyncSliceTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-      with process_async_tracks as materialized (
-        select
-          process_track.upid as upid,
-          process_track.name as trackName,
-          process.name as processName,
-          process.pid as pid,
-          group_concat(process_track.id) as trackIds,
-          count(1) as trackCount
-        from process_track
-        join process using(upid)
-        where
-            process_track.name is null or
-            process_track.name not like "% Timeline"
-        group by
-          process_track.upid,
-          process_track.name
-      )
       select
-        t.*,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from process_async_tracks t;
+        upid,
+        t.name as trackName,
+        t.track_ids as trackIds,
+        process.name as processName,
+        process.pid as pid
+      from _process_track_summary_by_upid_and_name t
+      join process using(upid)
+      where t.name is null or t.name not glob "* Timeline"
     `);
 
     const it = result.iter({
@@ -805,7 +764,6 @@
       trackIds: STR,
       processName: STR_NULL,
       pid: NUM_NULL,
-      maxDepth: NUM_NULL,
     });
     for (; it.valid(); it.next()) {
       const upid = it.upid;
@@ -813,14 +771,8 @@
       const rawTrackIds = it.trackIds;
       const processName = it.processName;
       const pid = it.pid;
-      const maxDepth = it.maxDepth;
 
-      if (maxDepth === null) {
-        // If there are no slices in this track, skip it.
-        continue;
-      }
-
-      const uuid = this.getUuid(0, upid);
+      const uuid = this.getUuid(null, upid);
       const name = getTrackName({
         name: trackName,
         upid,
@@ -840,37 +792,18 @@
 
   async addUserAsyncSliceTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-      with tracks_with_slices as materialized (
-        select distinct track_id
-        from slice
-      ),
-      global_tracks as (
-        select
-          uid_track.name,
-          uid_track.uid,
-          group_concat(uid_track.id) as trackIds,
-          count(uid_track.id) as trackCount
-        from uid_track
-        join tracks_with_slices
-        where tracks_with_slices.track_id == uid_track.id
-        group by uid_track.uid
-      )
       select
         t.name as name,
         t.uid as uid,
-        package_list.package_name as package_name,
-        t.trackIds as trackIds,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from global_tracks t
-      join package_list
-      where t.uid = package_list.uid
-      group by t.uid
-      `);
+        package_list.package_name as packageName
+      from _uid_track_track_summary_by_uid_and_name t
+      join package_list using (uid)
+    `);
 
     const it = result.iter({
       name: STR_NULL,
       uid: NUM_NULL,
-      package_name: STR_NULL,
+      packageName: STR_NULL,
     });
 
     // Map From [name] -> [uuid, key]
@@ -882,8 +815,7 @@
       }
       const rawName = it.name;
       const uid = it.uid === null ? undefined : it.uid;
-      const userName =
-        it.package_name === null ? `UID: ${uid}` : it.package_name;
+      const userName = it.packageName === null ? `UID: ${uid}` : it.packageName;
 
       const groupUuid = `uid-track-group${rawName}`;
       if (groupMap.get(rawName) === undefined) {
@@ -910,48 +842,29 @@
 
   async addActualFramesTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-      with process_async_tracks as materialized (
-        select
-          process_track.upid as upid,
-          process_track.name as trackName,
-          process.name as processName,
-          process.pid as pid,
-          group_concat(process_track.id) as trackIds,
-          count(1) as trackCount
-        from process_track
-        join process using(upid)
-        where process_track.name = "Actual Timeline"
-        group by
-          process_track.upid,
-          process_track.name
-      )
       select
-        t.*,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from process_async_tracks t;
-  `);
+        upid,
+        t.name as trackName,
+        process.name as processName,
+        process.pid as pid
+      from _process_track_summary_by_upid_and_name t
+      join process using(upid)
+      where t.name = "Actual Timeline"
+    `);
 
     const it = result.iter({
       upid: NUM,
       trackName: STR_NULL,
       processName: STR_NULL,
       pid: NUM_NULL,
-      maxDepth: NUM_NULL,
     });
     for (; it.valid(); it.next()) {
       const upid = it.upid;
       const trackName = it.trackName;
       const processName = it.processName;
       const pid = it.pid;
-      const maxDepth = it.maxDepth;
 
-      if (maxDepth === null) {
-        // If there are no slices in this track, skip it.
-        continue;
-      }
-
-      const uuid = this.getUuid(0, upid);
-
+      const uuid = this.getUuid(null, upid);
       const kind = ACTUAL_FRAMES_SLICE_TRACK_KIND;
       const name = getTrackName({
         name: trackName,
@@ -972,33 +885,21 @@
 
   async addExpectedFramesTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-      with process_async_tracks as materialized (
-        select
-          process_track.upid as upid,
-          process_track.name as trackName,
-          process.name as processName,
-          process.pid as pid,
-          group_concat(process_track.id) as trackIds,
-          count(1) as trackCount
-        from process_track
-        join process using(upid)
-        where process_track.name = "Expected Timeline"
-        group by
-          process_track.upid,
-          process_track.name
-      )
       select
-        t.*,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from process_async_tracks t;
-  `);
+        upid,
+        t.name as trackName,
+        process.name as processName,
+        process.pid as pid
+      from _process_track_summary_by_upid_and_name t
+      join process using(upid)
+      where t.name = "Expected Timeline"
+    `);
 
     const it = result.iter({
       upid: NUM,
       trackName: STR_NULL,
       processName: STR_NULL,
       pid: NUM_NULL,
-      maxDepth: NUM_NULL,
     });
 
     for (; it.valid(); it.next()) {
@@ -1006,15 +907,8 @@
       const trackName = it.trackName;
       const processName = it.processName;
       const pid = it.pid;
-      const maxDepth = it.maxDepth;
 
-      if (maxDepth === null) {
-        // If there are no slices in this track, skip it.
-        continue;
-      }
-
-      const uuid = this.getUuid(0, upid);
-
+      const uuid = this.getUuid(null, upid);
       const kind = EXPECTED_FRAMES_SLICE_TRACK_KIND;
       const name = getTrackName({
         name: trackName,
@@ -1035,7 +929,6 @@
 
   async addThreadSliceTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-      with slice_track as materialized (select distinct track_id from slice)
       select
         thread_track.utid as utid,
         thread_track.id as trackId,
@@ -1046,8 +939,8 @@
         thread.name as threadName,
         thread.upid as upid
       from thread_track
+      join _slice_track_summary using (id)
       join thread using(utid)
-      join slice_track on thread_track.id = slice_track.track_id
   `);
 
     const it = result.iter({
@@ -1091,14 +984,14 @@
 
   async addProcessCounterTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-    select
-      process_counter_track.id as trackId,
-      process_counter_track.name as trackName,
-      upid,
-      process.pid,
-      process.name as processName
-    from process_counter_track
-    join process using(upid);
+      select
+        process_counter_track.id as trackId,
+        process_counter_track.name as trackName,
+        upid,
+        process.pid,
+        process.name as processName
+      from process_counter_track
+      join process using(upid);
   `);
     const it = result.iter({
       trackId: NUM,
@@ -1113,7 +1006,7 @@
       const trackId = it.trackId;
       const trackName = it.trackName;
       const processName = it.processName;
-      const uuid = this.getUuid(0, upid);
+      const uuid = this.getUuid(null, upid);
       const name = getTrackName({
         name: trackName,
         upid,
@@ -1135,13 +1028,13 @@
 
   async addProcessHeapProfileTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-    select distinct(upid) from heap_profile_allocation
-    union
-    select distinct(upid) from heap_graph_object
+      select upid
+      from _process_available_info_summary
+      where allocation_count > 0 or graph_object_count > 0
   `);
     for (const it = result.iter({upid: NUM}); it.valid(); it.next()) {
       const upid = it.upid;
-      const uuid = this.getUuid(0, upid);
+      const uuid = this.getUuid(null, upid);
       this.tracksToAdd.push({
         uri: `perfetto.HeapProfile#${upid}`,
         trackSortKey: PrimaryTrackSortKey.HEAP_PROFILE_TRACK,
@@ -1153,14 +1046,15 @@
 
   async addProcessPerfSamplesTracks(engine: EngineProxy): Promise<void> {
     const result = await engine.query(`
-      select distinct upid, pid
-      from perf_sample join thread using (utid) join process using (upid)
-      where callsite_id is not null
+      select upid, pid
+      from _process_available_info_summary
+      join process using (upid)
+      where perf_sample_count > 0
   `);
     for (const it = result.iter({upid: NUM, pid: NUM}); it.valid(); it.next()) {
       const upid = it.upid;
       const pid = it.pid;
-      const uuid = this.getUuid(0, upid);
+      const uuid = this.getUuid(null, upid);
       this.tracksToAdd.push({
         uri: `perfetto.PerfSamplesProfile#${upid}`,
         trackSortKey: PrimaryTrackSortKey.PERF_SAMPLES_PROFILE_TRACK,
@@ -1170,22 +1064,22 @@
     }
   }
 
-  getUuidUnchecked(utid: number, upid: number | null) {
+  getUuidUnchecked(utid: number | null, upid: number | null) {
     return upid === null
-      ? this.utidToUuid.get(utid)
+      ? this.utidToUuid.get(utid!)
       : this.upidToUuid.get(upid);
   }
 
-  getUuid(utid: number, upid: number | null) {
+  getUuid(utid: number | null, upid: number | null) {
     return assertExists(this.getUuidUnchecked(utid, upid));
   }
 
-  getOrCreateUuid(utid: number, upid: number | null) {
+  getOrCreateUuid(utid: number | null, upid: number | null) {
     let uuid = this.getUuidUnchecked(utid, upid);
     if (uuid === undefined) {
       uuid = uuidv4();
       if (upid === null) {
-        this.utidToUuid.set(utid, uuid);
+        this.utidToUuid.set(utid!, uuid);
       } else {
         this.upidToUuid.set(upid, uuid);
       }
@@ -1274,181 +1168,155 @@
     //  thread name
     //  utid
     const result = await engine.query(`
-    with candidateThreadsAndProcesses as materialized (
-      select upid, 0 as utid from process_track
-      union
-      select upid, 0 as utid from process_counter_track
-      union
-      select upid, utid from thread_counter_track join thread using(utid)
-      union
-      select upid, utid from thread_track join thread using(utid)
-      union
-      select upid, utid from (
-        select distinct utid from sched
-      ) join thread using(utid) group by utid
-      union
-      select upid, 0 as utid from (
-        select distinct utid from perf_sample where callsite_id is not null
-      ) join thread using (utid)
-      union
-      select upid, utid from (
-        select distinct utid from cpu_profile_stack_sample
-      ) join thread using(utid)
-      union
-      select upid as upid, 0 as utid from heap_profile_allocation
-      union
-      select upid as upid, 0 as utid from heap_graph_object
-    ),
-    schedSum as materialized (
-      select upid, sum(thread_total_dur) as total_dur
-      from (
-        select utid, sum(dur) as thread_total_dur
-        from sched where dur != -1 and utid != 0
-        group by utid
+      with processGroups as (
+        select
+          upid,
+          process.pid as pid,
+          process.name as processName,
+          sum_running_dur as sumRunningDur,
+          thread_slice_count + process_slice_count as sliceCount,
+          perf_sample_count as perfSampleCount,
+          allocation_count as heapProfileAllocationCount,
+          graph_object_count as heapGraphObjectCount,
+          (
+            select group_concat(string_value)
+            from args
+            where
+              process.arg_set_id is not null and
+              arg_set_id = process.arg_set_id and
+              flat_key = 'chrome.process_label'
+          ) chromeProcessLabels,
+          case process.name
+            when 'Browser' then 3
+            when 'Gpu' then 2
+            when 'Renderer' then 1
+            else 0
+          end as chromeProcessRank
+        from _process_available_info_summary
+        join process using(upid)
+      ),
+      threadGroups as (
+        select
+          utid,
+          tid,
+          thread.name as threadName,
+          sum_running_dur as sumRunningDur,
+          slice_count as sliceCount,
+          perf_sample_count as perfSampleCount
+        from _thread_available_info_summary
+        join thread using (utid)
+        where upid is null
       )
-      join thread using (utid)
-      group by upid
-    ),
-    sliceSum as materialized (
-      select
-        process.upid as upid,
-        sum(cnt) as sliceCount
-      from (select track_id, count(*) as cnt from slice group by track_id)
-        left join thread_track on track_id = thread_track.id
-        left join thread on thread_track.utid = thread.utid
-        left join process_track on track_id = process_track.id
-        join process on process.upid = thread.upid
-          or process_track.upid = process.upid
-      where process.upid is not null
-      group by process.upid
-    )
-    select
-      the_tracks.upid,
-      the_tracks.utid,
-      total_dur as hasSched,
-      hasHeapProfiles,
-      process.pid as pid,
-      thread.tid as tid,
-      process.name as processName,
-      thread.name as threadName,
-      package_list.debuggable as isDebuggable,
-      ifnull((
-        select group_concat(string_value)
-        from args
-        where
-          process.arg_set_id is not null and
-          arg_set_id = process.arg_set_id and
-          flat_key = 'chrome.process_label'
-      ), '') AS chromeProcessLabels,
-      (case process.name
-         when 'Browser' then 3
-         when 'Gpu' then 2
-         when 'Renderer' then 1
-         else 0
-      end) as chromeProcessRank
-    from candidateThreadsAndProcesses the_tracks
-    left join schedSum using(upid)
-    left join (
-      select
-        distinct(upid) as upid,
-        true as hasHeapProfiles
-      from heap_profile_allocation
-      union
-      select
-        distinct(upid) as upid,
-        true as hasHeapProfiles
-      from heap_graph_object
-    ) using (upid)
-    left join (
-      select
-        thread.upid as upid,
-        sum(cnt) as perfSampleCount
+      select *
       from (
-          select utid, count(*) as cnt
-          from perf_sample where callsite_id is not null
-          group by utid
-      ) join thread using (utid)
-      group by thread.upid
-    ) using (upid)
-    left join sliceSum using (upid)
-    left join thread using(utid)
-    left join process using(upid)
-    left join package_list using(uid)
-    order by
-      chromeProcessRank desc,
-      hasHeapProfiles desc,
-      perfSampleCount desc,
-      total_dur desc,
-      sliceCount desc,
-      processName asc nulls last,
-      the_tracks.upid asc nulls last,
-      threadName asc nulls last,
-      the_tracks.utid asc nulls last;
+        select
+          upid,
+          null as utid,
+          pid,
+          null as tid,
+          processName,
+          null as threadName,
+          sumRunningDur > 0 as hasSched,
+          heapProfileAllocationCount > 0
+            or heapGraphObjectCount > 0 as hasHeapInfo,
+          ifnull(chromeProcessLabels, '') as chromeProcessLabels
+        from processGroups
+        order by
+          chromeProcessRank desc,
+          heapProfileAllocationCount desc,
+          heapGraphObjectCount desc,
+          perfSampleCount desc,
+          sumRunningDur desc,
+          sliceCount desc,
+          processName asc,
+          upid asc
+      )
+      union all
+      select *
+      from (
+        select
+          null,
+          utid,
+          null as pid,
+          tid,
+          null as processName,
+          threadName,
+          sumRunningDur > 0 as hasSched,
+          0 as hasHeapInfo,
+          '' as chromeProcessLabels
+        from threadGroups
+        order by
+          perfSampleCount desc,
+          sumRunningDur desc,
+          sliceCount desc,
+          threadName asc,
+          utid asc
+      )
   `);
 
     const it = result.iter({
-      utid: NUM,
       upid: NUM_NULL,
-      tid: NUM_NULL,
+      utid: NUM_NULL,
       pid: NUM_NULL,
-      threadName: STR_NULL,
+      tid: NUM_NULL,
       processName: STR_NULL,
+      threadName: STR_NULL,
       hasSched: NUM_NULL,
-      hasHeapProfiles: NUM_NULL,
+      hasHeapInfo: NUM_NULL,
       chromeProcessLabels: STR,
     });
     for (; it.valid(); it.next()) {
       const utid = it.utid;
-      const tid = it.tid;
       const upid = it.upid;
       const pid = it.pid;
+      const tid = it.tid;
       const threadName = it.threadName;
       const processName = it.processName;
       // eslint-disable-next-line @typescript-eslint/strict-boolean-expressions
       const hasSched = !!it.hasSched;
       // eslint-disable-next-line @typescript-eslint/strict-boolean-expressions
-      const hasHeapProfiles = !!it.hasHeapProfiles;
+      const hasHeapInfo = !!it.hasHeapInfo;
 
-      // Group by upid if present else by utid.
-      let pUuid =
-        upid === null ? this.utidToUuid.get(utid) : this.upidToUuid.get(upid);
-      // These should only happen once for each track group.
-      if (pUuid === undefined) {
-        pUuid = this.getOrCreateUuid(utid, upid);
-        const summaryTrackKey = uuidv4();
-        const type = hasSched ? 'schedule' : 'summary';
-        const uri = `perfetto.ProcessScheduling#${upid}.${utid}.${type}`;
+      const summaryTrackKey = uuidv4();
+      const type = hasSched ? 'schedule' : 'summary';
+      const uri = `perfetto.ProcessScheduling#${upid}.${utid}.${type}`;
 
-        this.tracksToAdd.push({
-          uri,
-          key: summaryTrackKey,
-          trackSortKey: hasSched
-            ? PrimaryTrackSortKey.PROCESS_SCHEDULING_TRACK
-            : PrimaryTrackSortKey.PROCESS_SUMMARY_TRACK,
-          name: `${upid === null ? tid : pid} summary`,
-          labels: it.chromeProcessLabels.split(','),
-        });
-
-        const name = getTrackName({
-          utid,
-          processName,
-          pid,
-          threadName,
-          tid,
-          upid,
-        });
-        const addTrackGroup = Actions.addTrackGroup({
-          summaryTrackKey,
-          name,
-          id: pUuid,
-          // Perf profiling tracks remain collapsed, otherwise we would have too
-          // many expanded process tracks for some perf traces, leading to
-          // jankyness.
-          collapsed: !hasHeapProfiles,
-        });
-
-        this.addTrackGroupActions.push(addTrackGroup);
+      // If previous groupings (e.g. kernel threads) picked up there tracks,
+      // don't try to regroup them.
+      const pUuid =
+        upid === null ? this.utidToUuid.get(utid!) : this.upidToUuid.get(upid);
+      if (pUuid !== undefined) {
+        continue;
       }
+
+      this.tracksToAdd.push({
+        uri,
+        key: summaryTrackKey,
+        trackSortKey: hasSched
+          ? PrimaryTrackSortKey.PROCESS_SCHEDULING_TRACK
+          : PrimaryTrackSortKey.PROCESS_SUMMARY_TRACK,
+        name: `${upid === null ? tid : pid} summary`,
+        labels: it.chromeProcessLabels.split(','),
+      });
+
+      const name = getTrackName({
+        utid,
+        processName,
+        pid,
+        threadName,
+        tid,
+        upid,
+      });
+      const addTrackGroup = Actions.addTrackGroup({
+        summaryTrackKey,
+        name,
+        id: this.getOrCreateUuid(utid, upid),
+        // Perf profiling tracks remain collapsed, otherwise we would have too
+        // many expanded process tracks for some perf traces, leading to
+        // jankyness.
+        collapsed: !hasHeapInfo,
+      });
+      this.addTrackGroupActions.push(addTrackGroup);
     }
   }
 
diff --git a/ui/src/frontend/pivot_table_argument_popup.ts b/ui/src/frontend/pivot_table_argument_popup.ts
index faa1579..949200e 100644
--- a/ui/src/frontend/pivot_table_argument_popup.ts
+++ b/ui/src/frontend/pivot_table_argument_popup.ts
@@ -20,21 +20,6 @@
 
 interface ArgumentPopupArgs {
   onArgumentChange: (arg: string) => void;
-  knownArguments: string[];
-}
-
-function longestString(array: string[]): string {
-  if (array.length === 0) {
-    return '';
-  }
-
-  let answer = array[0];
-  for (let i = 1; i < array.length; i++) {
-    if (array[i].length > answer.length) {
-      answer = array[i];
-    }
-  }
-  return answer;
 }
 
 // Component rendering popup for entering an argument name to use as a pivot.
@@ -47,41 +32,6 @@
     raf.scheduleFullRedraw();
   }
 
-  renderMatches(attrs: ArgumentPopupArgs): m.Child[] {
-    const result: m.Child[] = [];
-
-    for (const option of attrs.knownArguments) {
-      // Would be great to have smarter fuzzy matching, but in the meantime
-      // simple substring check should work fine.
-      const index = option.indexOf(this.argument);
-
-      if (index === -1) {
-        continue;
-      }
-
-      if (result.length === 10) {
-        break;
-      }
-
-      result.push(
-        m(
-          'div',
-          {
-            onclick: () => {
-              this.setArgument(attrs, option);
-            },
-          },
-          option.substring(0, index),
-          // Highlight the matching part with bold font
-          m('strong', this.argument),
-          option.substring(index + this.argument.length),
-        ),
-      );
-    }
-
-    return result;
-  }
-
   view({attrs}: m.Vnode<ArgumentPopupArgs>): m.Child {
     return m(
       '.name-completion',
@@ -94,8 +44,6 @@
         },
         value: this.argument,
       }),
-      m('.arguments-popup-sizer', longestString(attrs.knownArguments)),
-      this.renderMatches(attrs),
     );
   }
 }
diff --git a/ui/src/frontend/tables/attribute_modal_holder.ts b/ui/src/frontend/tables/attribute_modal_holder.ts
index 562a922..d154ab1 100644
--- a/ui/src/frontend/tables/attribute_modal_holder.ts
+++ b/ui/src/frontend/tables/attribute_modal_holder.ts
@@ -15,7 +15,6 @@
 import m from 'mithril';
 
 import {showModal} from '../../widgets/modal';
-import {globals} from '../globals';
 import {ArgumentPopup} from '../pivot_table_argument_popup';
 
 export class AttributeModalHolder {
@@ -45,8 +44,6 @@
 
   private renderModalContents() {
     return m(ArgumentPopup, {
-      knownArguments:
-        globals.state.nonSerializableState.pivotTable.argumentNames,
       onArgumentChange: (arg) => {
         this.typedArgument = arg;
       },
diff --git a/ui/src/plugins/org.kernel.LinuxKernelDevices/index.ts b/ui/src/plugins/org.kernel.LinuxKernelDevices/index.ts
index d7a37d8..b9127f0 100644
--- a/ui/src/plugins/org.kernel.LinuxKernelDevices/index.ts
+++ b/ui/src/plugins/org.kernel.LinuxKernelDevices/index.ts
@@ -27,23 +27,11 @@
 class LinuxKernelDevices implements Plugin {
   async onTraceLoad(ctx: PluginContextTrace): Promise<void> {
     const result = await ctx.engine.query(`
-      with
-        slices_tracks as materialized (
-          select distinct track_id
-          from slice
-        ),
-        tracks as (
-          select
-            linux_device_track.id as track_id,
-            linux_device_track.name
-          from linux_device_track
-          join slices_tracks on
-          slices_tracks.track_id = linux_device_track.id
-        )
       select
-        t.name,
-        t.track_id as trackId
-      from tracks as t
+        t.id as trackId,
+        t.name
+      from linux_device_track t
+      join _slice_track_summary using (id)
       order by t.name;
     `);
 
diff --git a/ui/src/public/utils.ts b/ui/src/public/utils.ts
index 60c4487..f5d3d7a 100644
--- a/ui/src/public/utils.ts
+++ b/ui/src/public/utils.ts
@@ -22,7 +22,7 @@
 export function getTrackName(
   args: Partial<{
     name: string | null;
-    utid: number;
+    utid: number | null;
     processName: string | null;
     pid: number | null;
     threadName: string | null;
diff --git a/ui/src/tracks/async_slices/index.ts b/ui/src/tracks/async_slices/index.ts
index 2e8b3d5..18883d9 100644
--- a/ui/src/tracks/async_slices/index.ts
+++ b/ui/src/tracks/async_slices/index.ts
@@ -30,77 +30,41 @@
   async addGlobalAsyncTracks(ctx: PluginContextTrace): Promise<void> {
     const {engine} = ctx;
     const rawGlobalAsyncTracks = await engine.query(`
-      with tracks_with_slices as materialized (
-        select distinct track_id
-        from slice
-      ),
-      global_tracks as (
-        select
-          track.parent_id as parent_id,
-          track.id as track_id,
-          track.name as name
-        from track
-        join tracks_with_slices on tracks_with_slices.track_id = track.id
-        where
-          track.type = "track"
-          or track.type = "gpu_track"
-          or track.type = "cpu_track"
-      ),
-      global_tracks_grouped as (
+      with global_tracks_grouped as (
         select
           parent_id,
           name,
-          group_concat(track_id) as trackIds,
-          count(track_id) as trackCount
-        from global_tracks track
+          group_concat(id) as trackIds,
+          count() as trackCount
+        from track t
+        join _slice_track_summary using (id)
+        where t.type in ('track', 'gpu_track', 'cpu_track')
         group by parent_id, name
       )
       select
-        t.parent_id as parentId,
-        p.name as parentName,
         t.name as name,
+        t.parent_id as parentId,
         t.trackIds as trackIds,
         __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from global_tracks_grouped AS t
-      left join track p on (t.parent_id = p.id)
-      order by p.name, t.name;
+      from global_tracks_grouped t
     `);
     const it = rawGlobalAsyncTracks.iter({
       name: STR_NULL,
-      parentName: STR_NULL,
       parentId: NUM_NULL,
       trackIds: STR,
-      maxDepth: NUM_NULL,
+      maxDepth: NUM,
     });
 
-    // let scrollJankRendered = false;
-
     for (; it.valid(); it.next()) {
       const rawName = it.name === null ? undefined : it.name;
-      // const rawParentName = it.parentName === null ? undefined :
-      // it.parentName;
       const displayName = getTrackName({
         name: rawName,
         kind: ASYNC_SLICE_TRACK_KIND,
       });
       const rawTrackIds = it.trackIds;
       const trackIds = rawTrackIds.split(',').map((v) => Number(v));
-      // const parentTrackId = it.parentId;
       const maxDepth = it.maxDepth;
 
-      // If there are no slices in this track, skip it.
-      if (maxDepth === null) {
-        continue;
-      }
-
-      // if (ENABLE_SCROLL_JANK_PLUGIN_V2.get() && !scrollJankRendered &&
-      //     name.includes(INPUT_LATENCY_TRACK)) {
-      //   // This ensures that the scroll jank tracks render above the tracks
-      //   // for GestureScrollUpdate.
-      //   await this.addScrollJankTracks(this.engine);
-      //   scrollJankRendered = true;
-      // }
-
       ctx.registerTrack({
         uri: `perfetto.AsyncSlices#${rawName}.${it.parentId}`,
         displayName,
@@ -115,27 +79,16 @@
 
   async addProcessAsyncSliceTracks(ctx: PluginContextTrace): Promise<void> {
     const result = await ctx.engine.query(`
-      with process_async_tracks as materialized (
-        select
-          process_track.upid as upid,
-          process_track.name as trackName,
-          process.name as processName,
-          process.pid as pid,
-          group_concat(process_track.id) as trackIds,
-          count(1) as trackCount
-        from process_track
-        join process using(upid)
-        where
-            process_track.name is null or
-            process_track.name not like "% Timeline"
-        group by
-          process_track.upid,
-          process_track.name
-      )
       select
-        t.*,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from process_async_tracks t;
+        upid,
+        t.name as trackName,
+        t.track_ids as trackIds,
+        process.name as processName,
+        process.pid as pid,
+        __max_layout_depth(t.track_count, t.track_ids) as maxDepth
+      from _process_track_summary_by_upid_and_name t
+      join process using(upid)
+      where t.name is null or t.name not glob "* Timeline"
     `);
 
     const it = result.iter({
@@ -144,7 +97,7 @@
       trackIds: STR,
       processName: STR_NULL,
       pid: NUM_NULL,
-      maxDepth: NUM_NULL,
+      maxDepth: NUM,
     });
     for (; it.valid(); it.next()) {
       const upid = it.upid;
@@ -155,11 +108,6 @@
       const pid = it.pid;
       const maxDepth = it.maxDepth;
 
-      if (maxDepth === null) {
-        // If there are no slices in this track, skip it.
-        continue;
-      }
-
       const kind = ASYNC_SLICE_TRACK_KIND;
       const displayName = getTrackName({
         name: trackName,
@@ -188,37 +136,20 @@
   async addUserAsyncSliceTracks(ctx: PluginContextTrace): Promise<void> {
     const {engine} = ctx;
     const result = await engine.query(`
-      with tracks_with_slices as materialized (
-        select distinct track_id
-        from slice
-      ),
-      global_tracks as (
-        select
-          uid_track.name,
-          uid_track.uid,
-          group_concat(uid_track.id) as trackIds,
-          count(uid_track.id) as trackCount
-        from uid_track
-        join tracks_with_slices
-        where tracks_with_slices.track_id == uid_track.id
-        group by uid_track.uid
-      )
       select
         t.name as name,
         t.uid as uid,
-        package_list.package_name as package_name,
-        t.trackIds as trackIds,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from global_tracks t
-      join package_list
-      where t.uid = package_list.uid
-      group by t.uid
-      `);
+        package_list.package_name as packageName,
+        t.track_ids as trackIds,
+        __max_layout_depth(t.track_count, t.track_ids) as maxDepth
+      from _uid_track_track_summary_by_uid_and_name t
+      join package_list using (uid)
+    `);
 
     const it = result.iter({
       name: STR_NULL,
       uid: NUM_NULL,
-      package_name: STR_NULL,
+      packageName: STR_NULL,
       trackIds: STR,
       maxDepth: NUM_NULL,
     });
@@ -226,7 +157,7 @@
     for (; it.valid(); it.next()) {
       const kind = ASYNC_SLICE_TRACK_KIND;
       const rawName = it.name === null ? undefined : it.name;
-      const userName = it.package_name === null ? undefined : it.package_name;
+      const userName = it.packageName === null ? undefined : it.packageName;
       const uid = it.uid === null ? undefined : it.uid;
       const rawTrackIds = it.trackIds;
       const trackIds = rawTrackIds.split(',').map((v) => Number(v));
diff --git a/ui/src/tracks/frames/index.ts b/ui/src/tracks/frames/index.ts
index 47e88ff..2cc877f 100644
--- a/ui/src/tracks/frames/index.ts
+++ b/ui/src/tracks/frames/index.ts
@@ -31,26 +31,17 @@
   async addExpectedFrames(ctx: PluginContextTrace): Promise<void> {
     const {engine} = ctx;
     const result = await engine.query(`
-      with process_async_tracks as materialized (
-        select
-          process_track.upid as upid,
-          process_track.name as trackName,
-          process.name as processName,
-          process.pid as pid,
-          group_concat(process_track.id) as trackIds,
-          count(1) as trackCount
-        from process_track
-        join process using(upid)
-        where process_track.name = "Expected Timeline"
-        group by
-          process_track.upid,
-          process_track.name
-      )
       select
-        t.*,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from process_async_tracks t;
-  `);
+        upid,
+        t.name as trackName,
+        t.track_ids as trackIds,
+        process.name as processName,
+        process.pid as pid,
+        __max_layout_depth(t.track_count, t.track_ids) as maxDepth
+      from _process_track_summary_by_upid_and_name t
+      join process using(upid)
+      where t.name = "Expected Timeline"
+    `);
 
     const it = result.iter({
       upid: NUM,
@@ -58,7 +49,7 @@
       trackIds: STR,
       processName: STR_NULL,
       pid: NUM_NULL,
-      maxDepth: NUM_NULL,
+      maxDepth: NUM,
     });
 
     for (; it.valid(); it.next()) {
@@ -70,11 +61,6 @@
       const pid = it.pid;
       const maxDepth = it.maxDepth;
 
-      if (maxDepth === null) {
-        // If there are no slices in this track, skip it.
-        continue;
-      }
-
       const displayName = getTrackName({
         name: trackName,
         upid,
@@ -103,26 +89,17 @@
   async addActualFrames(ctx: PluginContextTrace): Promise<void> {
     const {engine} = ctx;
     const result = await engine.query(`
-      with process_async_tracks as materialized (
-        select
-          process_track.upid as upid,
-          process_track.name as trackName,
-          process.name as processName,
-          process.pid as pid,
-          group_concat(process_track.id) as trackIds,
-          count(1) as trackCount
-        from process_track
-        join process using(upid)
-        where process_track.name = "Actual Timeline"
-        group by
-          process_track.upid,
-          process_track.name
-      )
       select
-        t.*,
-        __max_layout_depth(t.trackCount, t.trackIds) as maxDepth
-      from process_async_tracks t;
-  `);
+        upid,
+        t.name as trackName,
+        t.track_ids as trackIds,
+        process.name as processName,
+        process.pid as pid,
+        __max_layout_depth(t.track_count, t.track_ids) as maxDepth
+      from _process_track_summary_by_upid_and_name t
+      join process using(upid)
+      where t.name = "Actual Timeline"
+    `);
 
     const it = result.iter({
       upid: NUM,
diff --git a/ui/src/tracks/process_summary/index.ts b/ui/src/tracks/process_summary/index.ts
index 47a93b0..3f53335 100644
--- a/ui/src/tracks/process_summary/index.ts
+++ b/ui/src/tracks/process_summary/index.ts
@@ -12,14 +12,11 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-import {v4 as uuidv4} from 'uuid';
-
 import {Plugin, PluginContextTrace, PluginDescriptor} from '../../public';
 import {
   LONG_NULL,
   NUM,
   NUM_NULL,
-  STR,
   STR_NULL,
 } from '../../trace_processor/query_result';
 import {assertExists} from '../../base/logging';
@@ -37,166 +34,61 @@
 
 // This plugin now manages both process "scheduling" and "summary" tracks.
 class ProcessSummaryPlugin implements Plugin {
-  private upidToUuid = new Map<number, string>();
-  private utidToUuid = new Map<number, string>();
-
   async onTraceLoad(ctx: PluginContextTrace): Promise<void> {
     await this.addProcessTrackGroups(ctx);
     await this.addKernelThreadSummary(ctx);
   }
 
   private async addProcessTrackGroups(ctx: PluginContextTrace): Promise<void> {
-    this.upidToUuid.clear();
-    this.utidToUuid.clear();
-
-    // We want to create groups of tracks in a specific order.
-    // The tracks should be grouped:
-    //    by upid
-    //    or (if upid is null) by utid
-    // the groups should be sorted by:
-    //  Chrome-based process rank based on process names (e.g. Browser)
-    //  has a heap profile or not
-    //  total cpu time *for the whole parent process*
-    //  process name
-    //  upid
-    //  thread name
-    //  utid
     const result = await ctx.engine.query(`
-    with candidateThreadsAndProcesses as materialized (
-      select upid, 0 as utid from process_track
-      union
-      select upid, 0 as utid from process_counter_track
-      union
-      select upid, utid from thread_counter_track join thread using(utid)
-      union
-      select upid, utid from thread_track join thread using(utid)
-      union
-      select upid, utid from (
-        select distinct utid from sched
-      ) join thread using(utid) group by utid
-      union
-      select upid, 0 as utid from (
-        select distinct utid from perf_sample where callsite_id is not null
-      ) join thread using (utid)
-      union
-      select upid, utid from (
-        select distinct utid from cpu_profile_stack_sample
-      ) join thread using(utid)
-      union
-      select upid as upid, 0 as utid from heap_profile_allocation
-      union
-      select upid as upid, 0 as utid from heap_graph_object
-    ),
-    schedSummary as materialized (
-      select
-        upid,
-        sum(thread_total_dur) as total_dur,
-        max(thread_max_dur) as total_max_dur,
-        sum(thread_event_count) as total_event_count
+      select *
       from (
         select
-          utid,
-          sum(dur) as thread_total_dur,
-          max(dur) as thread_max_dur,
-          count() as thread_event_count
-        from sched where dur != -1 and utid != 0
-        group by utid
+          _process_available_info_summary.upid,
+          null as utid,
+          pid,
+          null as tid,
+          process.name as processName,
+          null as threadName,
+          sum_running_dur > 0 as hasSched,
+          max_running_dur as maxRunningDur,
+          running_count as runningCount,
+          android_process_metadata.debuggable as isDebuggable
+        from _process_available_info_summary
+        join process using(upid)
+        left join android_process_metadata using(upid)
       )
-      join thread using (utid)
-      group by upid
-    ),
-    sliceSum as materialized (
-      select
-        process.upid as upid,
-        sum(cnt) as sliceCount
-      from (select track_id, count(*) as cnt from slice group by track_id)
-        left join thread_track on track_id = thread_track.id
-        left join thread on thread_track.utid = thread.utid
-        left join process_track on track_id = process_track.id
-        join process on process.upid = thread.upid
-          or process_track.upid = process.upid
-      where process.upid is not null
-      group by process.upid
-    )
-    select
-      the_tracks.upid,
-      the_tracks.utid,
-      total_dur as hasSched,
-      total_max_dur as schedMaxDur,
-      total_event_count as schedEventCount,
-      hasHeapProfiles,
-      process.pid as pid,
-      thread.tid as tid,
-      process.name as processName,
-      thread.name as threadName,
-      package_list.debuggable as isDebuggable,
-      ifnull((
-        select group_concat(string_value)
-        from args
-        where
-          process.arg_set_id is not null and
-          arg_set_id = process.arg_set_id and
-          flat_key = 'chrome.process_label'
-      ), '') AS chromeProcessLabels,
-      (case process.name
-         when 'Browser' then 3
-         when 'Gpu' then 2
-         when 'Renderer' then 1
-         else 0
-      end) as chromeProcessRank
-    from candidateThreadsAndProcesses the_tracks
-    left join schedSummary using(upid)
-    left join (
-      select
-        distinct(upid) as upid,
-        true as hasHeapProfiles
-      from heap_profile_allocation
-      union
-      select
-        distinct(upid) as upid,
-        true as hasHeapProfiles
-      from heap_graph_object
-    ) using (upid)
-    left join (
-      select
-        thread.upid as upid,
-        sum(cnt) as perfSampleCount
+      union all
+      select *
       from (
-          select utid, count(*) as cnt
-          from perf_sample where callsite_id is not null
-          group by utid
-      ) join thread using (utid)
-      group by thread.upid
-    ) using (upid)
-    left join sliceSum using (upid)
-    left join thread using(utid)
-    left join process using(upid)
-    left join package_list using(uid)
-    order by
-      chromeProcessRank desc,
-      hasHeapProfiles desc,
-      perfSampleCount desc,
-      total_dur desc,
-      sliceCount desc,
-      processName asc nulls last,
-      the_tracks.upid asc nulls last,
-      threadName asc nulls last,
-      the_tracks.utid asc nulls last;
+        select
+          null,
+          utid,
+          null as pid,
+          tid,
+          null as processName,
+          thread.name threadName,
+          sum_running_dur > 0 as hasSched,
+          max_running_dur as maxRunningDur,
+          running_count as runningCount,
+          0 as isDebuggable
+        from _thread_available_info_summary
+        join thread using (utid)
+        where upid is null
+      )
   `);
 
     const it = result.iter({
-      utid: NUM,
       upid: NUM_NULL,
-      tid: NUM_NULL,
+      utid: NUM_NULL,
       pid: NUM_NULL,
-      threadName: STR_NULL,
+      tid: NUM_NULL,
       processName: STR_NULL,
+      threadName: STR_NULL,
       hasSched: NUM_NULL,
-      schedMaxDur: LONG_NULL,
-      schedEventCount: NUM_NULL,
-      hasHeapProfiles: NUM_NULL,
+      maxRunningDur: LONG_NULL,
+      runningCount: NUM_NULL,
       isDebuggable: NUM_NULL,
-      chromeProcessLabels: STR,
     });
     for (; it.valid(); it.next()) {
       const utid = it.utid;
@@ -204,59 +96,52 @@
       const upid = it.upid;
       const pid = it.pid;
       const hasSched = Boolean(it.hasSched);
-      const schedMaxDur = it.schedMaxDur;
-      const schedEventCount = it.schedEventCount;
+      const maxRunningDur = it.maxRunningDur;
+      const runningCount = it.runningCount;
       const isDebuggable = Boolean(it.isDebuggable);
 
-      // Group by upid if present else by utid.
-      let pUuid =
-        upid === null ? this.utidToUuid.get(utid) : this.upidToUuid.get(upid);
-      // These should only happen once for each track group.
-      if (pUuid === undefined) {
-        pUuid = this.getOrCreateUuid(utid, upid);
-        const pidForColor = pid ?? tid ?? upid ?? utid ?? 0;
-        const type = hasSched ? 'schedule' : 'summary';
-        const uri = `perfetto.ProcessScheduling#${upid}.${utid}.${type}`;
+      const pidForColor = pid ?? tid ?? upid ?? utid ?? 0;
+      const type = hasSched ? 'schedule' : 'summary';
+      const uri = `perfetto.ProcessScheduling#${upid}.${utid}.${type}`;
 
-        if (hasSched) {
-          const config: ProcessSchedulingTrackConfig = {
-            pidForColor,
-            upid,
-            utid,
-          };
+      if (hasSched) {
+        const config: ProcessSchedulingTrackConfig = {
+          pidForColor,
+          upid,
+          utid,
+        };
 
-          ctx.registerTrack({
-            uri,
-            displayName: `${upid === null ? tid : pid} schedule`,
-            kind: PROCESS_SCHEDULING_TRACK_KIND,
-            tags: {
-              isDebuggable,
-            },
-            trackFactory: () =>
-              new ProcessSchedulingTrack(
-                ctx.engine,
-                config,
-                assertExists(schedMaxDur),
-                assertExists(schedEventCount),
-              ),
-          });
-        } else {
-          const config: ProcessSummaryTrackConfig = {
-            pidForColor,
-            upid,
-            utid,
-          };
+        ctx.registerTrack({
+          uri,
+          displayName: `${upid === null ? tid : pid} schedule`,
+          kind: PROCESS_SCHEDULING_TRACK_KIND,
+          tags: {
+            isDebuggable,
+          },
+          trackFactory: () =>
+            new ProcessSchedulingTrack(
+              ctx.engine,
+              config,
+              assertExists(maxRunningDur),
+              assertExists(runningCount),
+            ),
+        });
+      } else {
+        const config: ProcessSummaryTrackConfig = {
+          pidForColor,
+          upid,
+          utid,
+        };
 
-          ctx.registerTrack({
-            uri,
-            displayName: `${upid === null ? tid : pid} summary`,
-            kind: PROCESS_SUMMARY_TRACK,
-            tags: {
-              isDebuggable,
-            },
-            trackFactory: () => new ProcessSummaryTrack(ctx.engine, config),
-          });
-        }
+        ctx.registerTrack({
+          uri,
+          displayName: `${upid === null ? tid : pid} summary`,
+          kind: PROCESS_SUMMARY_TRACK,
+          tags: {
+            isDebuggable,
+          },
+          trackFactory: () => new ProcessSummaryTrack(ctx.engine, config),
+        });
       }
     }
   }
@@ -313,25 +198,6 @@
       trackFactory: () => new ProcessSummaryTrack(ctx.engine, config),
     });
   }
-
-  private getOrCreateUuid(utid: number, upid: number | null) {
-    let uuid = this.getUuidUnchecked(utid, upid);
-    if (uuid === undefined) {
-      uuid = uuidv4();
-      if (upid === null) {
-        this.utidToUuid.set(utid, uuid);
-      } else {
-        this.upidToUuid.set(upid, uuid);
-      }
-    }
-    return uuid;
-  }
-
-  getUuidUnchecked(utid: number, upid: number | null) {
-    return upid === null
-      ? this.utidToUuid.get(utid)
-      : this.upidToUuid.get(upid);
-  }
 }
 
 export const plugin: PluginDescriptor = {
diff --git a/ui/src/tracks/process_summary/process_scheduling_track.ts b/ui/src/tracks/process_summary/process_scheduling_track.ts
index 07400ca..64fe72c 100644
--- a/ui/src/tracks/process_summary/process_scheduling_track.ts
+++ b/ui/src/tracks/process_summary/process_scheduling_track.ts
@@ -50,8 +50,8 @@
 
 export interface Config {
   pidForColor: number;
-  upid: null | number;
-  utid: number;
+  upid: number | null;
+  utid: number | null;
 }
 
 export class ProcessSchedulingTrack implements Track {
diff --git a/ui/src/tracks/process_summary/process_summary_track.ts b/ui/src/tracks/process_summary/process_summary_track.ts
index 6a8e687..f556728 100644
--- a/ui/src/tracks/process_summary/process_summary_track.ts
+++ b/ui/src/tracks/process_summary/process_summary_track.ts
@@ -37,7 +37,7 @@
 export interface Config {
   pidForColor: number;
   upid: number | null;
-  utid: number;
+  utid: number | null;
 }
 
 const MARGIN_TOP = 5;
diff --git a/ui/src/tracks/thread_state/index.ts b/ui/src/tracks/thread_state/index.ts
index b38eb1d..73e2faf 100644
--- a/ui/src/tracks/thread_state/index.ts
+++ b/ui/src/tracks/thread_state/index.ts
@@ -32,14 +32,14 @@
   async onTraceLoad(ctx: PluginContextTrace): Promise<void> {
     const {engine} = ctx;
     const result = await engine.query(`
-      with ts_distinct as materialized (select distinct utid from thread_state)
       select
         utid,
         upid,
         tid,
         thread.name as threadName
       from thread
-      where utid != 0 and utid in ts_distinct`);
+      join _sched_summary using (utid)
+    `);
 
     const it = result.iter({
       utid: NUM,