Merge "tp: fix handling of -1 slices in internal_layout" into main
diff --git a/Android.bp b/Android.bp
index e435f99..94b84ff 100644
--- a/Android.bp
+++ b/Android.bp
@@ -5323,6 +5323,7 @@
 genrule {
     name: "perfetto_protos_perfetto_metrics_webview_descriptor",
     srcs: [
+        ":libprotobuf-internal-descriptor-proto",
         "protos/perfetto/metrics/android/ad_services_metric.proto",
         "protos/perfetto/metrics/android/android_blocking_call.proto",
         "protos/perfetto/metrics/android/android_blocking_calls_cuj_metric.proto",
@@ -5388,7 +5389,7 @@
     tools: [
         "aprotoc",
     ],
-    cmd: "mkdir -p $(genDir)/external/perfetto/ && $(location aprotoc) --proto_path=external/perfetto --descriptor_set_out=$(out) $(in)",
+    cmd: "mkdir -p $(genDir)/external/perfetto/ && $(location aprotoc) --proto_path=external/perfetto --proto_path=external/protobuf/src --descriptor_set_out=$(out) $(in)",
     out: [
         "perfetto_protos_perfetto_metrics_webview_descriptor.bin",
     ],
@@ -11347,6 +11348,7 @@
     srcs: [
         "src/trace_processor/db/column/arrangement_overlay_unittest.cc",
         "src/trace_processor/db/column/dense_null_overlay_unittest.cc",
+        "src/trace_processor/db/column/fake_storage_unittest.cc",
         "src/trace_processor/db/column/id_storage_unittest.cc",
         "src/trace_processor/db/column/null_overlay_unittest.cc",
         "src/trace_processor/db/column/numeric_storage_unittest.cc",
diff --git a/CHANGELOG b/CHANGELOG
index b600600..99ff035 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -4,7 +4,18 @@
   Trace Processor:
     *
   UI:
-    *
+    * Add tracks to the list of searchable items.
+    * Use mipmaps to improve track query performance on large traces.
+    * Fix slow scrolling bug in ftrace explorer tab on low DPI machines.
+    * Overhaul track decider queries to improve trace load times.
+    * Add track
+    * Tidy up command names & remove some example ones.
+    * Remove arg auto-completion in pivot table.
+    * Show dominator tree views by default.
+    * Fix counter event selection off-by-one error.
+    * Add viewport control to the plugin API.
+    * Sticky track titles to improve track button accessibility in tall tracks.
+    * A handful of small bugfixes.
   SDK:
     * The TRACE_EVENT macro used to reject `const char *` event names: either
       `StaticString` or `DynamicString` needed to be specified. In the last year
diff --git a/gn/proto_library.gni b/gn/proto_library.gni
index f69d3ce..a7a77c6 100644
--- a/gn/proto_library.gni
+++ b/gn/proto_library.gni
@@ -371,7 +371,7 @@
 
         metadata = {
           proto_library_sources = invoker.sources
-          import_dirs = import_dirs_
+          proto_import_dirs = import_dirs_
           exports = get_path_info(public_deps_, "abspath")
         }
         forward_variables_from(invoker, vars_to_forward)
diff --git a/gn/standalone/proto_library.gni b/gn/standalone/proto_library.gni
index 07b8140..1a23a97 100644
--- a/gn/standalone/proto_library.gni
+++ b/gn/standalone/proto_library.gni
@@ -170,6 +170,10 @@
       ]
     }
 
+    metadata = {
+      proto_import_dirs = import_dirs
+    }
+
     if (generate_cc) {
       cc_generator_options_ = ""
       if (defined(invoker.cc_generator_options)) {
diff --git a/python/perfetto/prebuilts/perfetto_prebuilts.py b/python/perfetto/prebuilts/perfetto_prebuilts.py
index 087a337..61283cc 100644
--- a/python/perfetto/prebuilts/perfetto_prebuilts.py
+++ b/python/perfetto/prebuilts/perfetto_prebuilts.py
@@ -46,6 +46,9 @@
 import platform
 import subprocess
 import sys
+import threading
+
+DOWNLOAD_LOCK = threading.Lock()
 
 
 def download_or_get_cached(file_name, url, sha256):
@@ -62,28 +65,36 @@
   sha256_path = os.path.join(dir, file_name + '.sha256')
   needs_download = True
 
-  # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
-  # download is cached into file_name.sha256, just check if that matches.
-  if os.path.exists(bin_path) and os.path.exists(sha256_path):
-    with open(sha256_path, 'rb') as f:
-      digest = f.read().decode()
-      if digest == sha256:
-        needs_download = False
+  try:
+    # In BatchTraceProcessor, many threads can be trying to execute the below
+    # code in parallel. For this reason, protect the whole operation with a
+    # lock.
+    DOWNLOAD_LOCK.acquire()
 
-  if needs_download:
-    # Either the filed doesn't exist or the SHA256 doesn't match.
-    tmp_path = bin_path + '.tmp'
-    print('Downloading ' + url)
-    subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
-    with open(tmp_path, 'rb') as fd:
-      actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
-    if actual_sha256 != sha256:
-      raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
-                      (url, actual_sha256, sha256))
-    os.chmod(tmp_path, 0o755)
-    os.replace(tmp_path, bin_path)
-    with open(sha256_path, 'w') as f:
-      f.write(sha256)
+    # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
+    # download is cached into file_name.sha256, just check if that matches.
+    if os.path.exists(bin_path) and os.path.exists(sha256_path):
+      with open(sha256_path, 'rb') as f:
+        digest = f.read().decode()
+        if digest == sha256:
+          needs_download = False
+
+    if needs_download:
+      # Either the filed doesn't exist or the SHA256 doesn't match.
+      tmp_path = bin_path + '.tmp'
+      print('Downloading ' + url)
+      subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
+      with open(tmp_path, 'rb') as fd:
+        actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
+      if actual_sha256 != sha256:
+        raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
+                        (url, actual_sha256, sha256))
+      os.chmod(tmp_path, 0o755)
+      os.replace(tmp_path, bin_path)
+      with open(sha256_path, 'w') as f:
+        f.write(sha256)
+  finally:
+    DOWNLOAD_LOCK.release()
   return bin_path
 
 
diff --git a/src/trace_processor/containers/implicit_segment_forest.h b/src/trace_processor/containers/implicit_segment_forest.h
index 5b58cb4..547abc6 100644
--- a/src/trace_processor/containers/implicit_segment_forest.h
+++ b/src/trace_processor/containers/implicit_segment_forest.h
@@ -98,7 +98,9 @@
     values_.emplace_back(std::move(v));
 
     size_t len = values_.size();
-    auto levels_to_index = static_cast<uint32_t>(__builtin_ctzl(~len)) - 1;
+    auto levels_to_index = static_cast<uint32_t>(__builtin_ctzl(
+                               static_cast<unsigned long>(~len))) -
+                           1;
 
     size_t cur = len - 1;
     for (uint32_t level = 0; level < levels_to_index; ++level) {
diff --git a/src/trace_processor/db/column/BUILD.gn b/src/trace_processor/db/column/BUILD.gn
index 74d1611..d53e7ae 100644
--- a/src/trace_processor/db/column/BUILD.gn
+++ b/src/trace_processor/db/column/BUILD.gn
@@ -75,6 +75,7 @@
   sources = [
     "arrangement_overlay_unittest.cc",
     "dense_null_overlay_unittest.cc",
+    "fake_storage_unittest.cc",
     "id_storage_unittest.cc",
     "null_overlay_unittest.cc",
     "numeric_storage_unittest.cc",
diff --git a/src/trace_processor/db/column/fake_storage.cc b/src/trace_processor/db/column/fake_storage.cc
index f587c77..babfb7c 100644
--- a/src/trace_processor/db/column/fake_storage.cc
+++ b/src/trace_processor/db/column/fake_storage.cc
@@ -41,6 +41,7 @@
 SingleSearchResult FakeStorageChain::SingleSearch(FilterOp,
                                                   SqlValue,
                                                   uint32_t i) const {
+  PERFETTO_CHECK(i < size_);
   switch (strategy_) {
     case kAll:
       return SingleSearchResult::kMatch;
@@ -115,37 +116,37 @@
     FilterOp,
     SqlValue,
     const OrderedIndices& indices) const {
-  if (strategy_ == kAll) {
-    return {0, indices.size};
-  }
-
-  if (strategy_ == kNone) {
-    return {};
-  }
-
-  if (strategy_ == kRange) {
-    // We are looking at intersection of |range_| and |indices_|.
-    const uint32_t* first_in_range = std::partition_point(
-        indices.data, indices.data + indices.size,
-        [this](uint32_t i) { return !range_.Contains(i); });
-    const uint32_t* first_outside_range =
-        std::partition_point(first_in_range, indices.data + indices.size,
-                             [this](uint32_t i) { return range_.Contains(i); });
-    return {static_cast<uint32_t>(std::distance(indices.data, first_in_range)),
-            static_cast<uint32_t>(
-                std::distance(indices.data, first_outside_range))};
-  }
-
-  PERFETTO_DCHECK(strategy_ == kBitVector);
-  // We are looking at intersection of |range_| and |bit_vector_|.
-  const uint32_t* first_set = std::partition_point(
-      indices.data, indices.data + indices.size,
-      [this](uint32_t i) { return !bit_vector_.IsSet(i); });
-  const uint32_t* first_non_set =
-      std::partition_point(first_set, indices.data + indices.size,
-                           [this](uint32_t i) { return bit_vector_.IsSet(i); });
-  return {static_cast<uint32_t>(std::distance(indices.data, first_set)),
+  switch (strategy_) {
+    case kAll:
+      return {0, indices.size};
+    case kNone:
+      return {};
+    case kRange: {
+      // We are looking at intersection of |range_| and |indices_|.
+      const uint32_t* first_in_range = std::partition_point(
+          indices.data, indices.data + indices.size,
+          [this](uint32_t i) { return !range_.Contains(i); });
+      const uint32_t* first_outside_range = std::partition_point(
+          first_in_range, indices.data + indices.size,
+          [this](uint32_t i) { return range_.Contains(i); });
+      return {
+          static_cast<uint32_t>(std::distance(indices.data, first_in_range)),
+          static_cast<uint32_t>(
+              std::distance(indices.data, first_outside_range))};
+    }
+    case kBitVector:
+      // We are looking at intersection of |range_| and |bit_vector_|.
+      const uint32_t* first_set = std::partition_point(
+          indices.data, indices.data + indices.size,
+          [this](uint32_t i) { return !bit_vector_.IsSet(i); });
+      const uint32_t* first_non_set = std::partition_point(
+          first_set, indices.data + indices.size,
+          [this](uint32_t i) { return bit_vector_.IsSet(i); });
+      return {
+          static_cast<uint32_t>(std::distance(indices.data, first_set)),
           static_cast<uint32_t>(std::distance(indices.data, first_non_set))};
+  }
+  PERFETTO_FATAL("For GCC");
 }
 
 void FakeStorageChain::StableSort(SortToken*, SortToken*, SortDirection) const {
diff --git a/src/trace_processor/db/column/fake_storage_unittest.cc b/src/trace_processor/db/column/fake_storage_unittest.cc
new file mode 100644
index 0000000..0bc0211
--- /dev/null
+++ b/src/trace_processor/db/column/fake_storage_unittest.cc
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "src/trace_processor/db/column/fake_storage.h"
+
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+#include "perfetto/trace_processor/basic_types.h"
+#include "src/trace_processor/containers/bit_vector.h"
+#include "src/trace_processor/db/column/data_layer.h"
+#include "src/trace_processor/db/column/types.h"
+#include "src/trace_processor/db/column/utils.h"
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto::trace_processor {
+
+inline bool operator==(const Range& a, const Range& b) {
+  return std::tie(a.start, a.end) == std::tie(b.start, b.end);
+}
+
+inline bool operator==(const BitVector& a, const BitVector& b) {
+  return a.size() == b.size() && a.CountSetBits() == b.CountSetBits();
+}
+
+namespace column {
+namespace {
+
+using testing::ElementsAre;
+using testing::IsEmpty;
+
+using Indices = DataLayerChain::Indices;
+using OrderedIndices = DataLayerChain::OrderedIndices;
+
+TEST(FakeStorage, ValidateSearchConstraints) {
+  {
+    // All passes
+    auto fake = FakeStorageChain::SearchAll(10);
+    EXPECT_EQ(fake->ValidateSearchConstraints(FilterOp::kEq, SqlValue()),
+              SearchValidationResult::kOk);
+  }
+  {
+    // None passes
+    auto fake = FakeStorageChain::SearchNone(10);
+    EXPECT_EQ(fake->ValidateSearchConstraints(FilterOp::kEq, SqlValue()),
+              SearchValidationResult::kOk);
+  }
+  {
+    // Index vector
+    auto fake =
+        FakeStorageChain::SearchSubset(5, std::vector<uint32_t>{1, 2, 3, 4, 5});
+    EXPECT_EQ(fake->ValidateSearchConstraints(FilterOp::kEq, SqlValue()),
+              SearchValidationResult::kOk);
+  }
+  {
+    // BitVector
+    auto fake = FakeStorageChain::SearchSubset(5, BitVector{0, 1, 0, 1, 0});
+    EXPECT_EQ(fake->ValidateSearchConstraints(FilterOp::kEq, SqlValue()),
+              SearchValidationResult::kOk);
+  }
+  {
+    // Range
+    auto fake = FakeStorageChain::SearchSubset(5, Range(1, 4));
+    EXPECT_EQ(fake->ValidateSearchConstraints(FilterOp::kEq, SqlValue()),
+              SearchValidationResult::kOk);
+  }
+}
+
+TEST(FakeStorage, SingleSearch) {
+  {
+    // All passes
+    auto fake = FakeStorageChain::SearchAll(10);
+    EXPECT_EQ(fake->SingleSearch(FilterOp::kEq, SqlValue(), 5u),
+              SingleSearchResult::kMatch);
+  }
+  {
+    // None passes
+    auto fake = FakeStorageChain::SearchNone(10);
+    EXPECT_EQ(fake->SingleSearch(FilterOp::kEq, SqlValue(), 5u),
+              SingleSearchResult::kNoMatch);
+  }
+  {
+    // Index vector
+    auto fake =
+        FakeStorageChain::SearchSubset(5, std::vector<uint32_t>{1, 2, 3, 4, 5});
+    EXPECT_EQ(fake->SingleSearch(FilterOp::kEq, SqlValue(), 0u),
+              SingleSearchResult::kNoMatch);
+    EXPECT_EQ(fake->SingleSearch(FilterOp::kEq, SqlValue(), 1u),
+              SingleSearchResult::kMatch);
+  }
+  {
+    // BitVector
+    auto fake = FakeStorageChain::SearchSubset(5, BitVector{0, 1, 0, 1, 0});
+    EXPECT_EQ(fake->SingleSearch(FilterOp::kEq, SqlValue(), 0),
+              SingleSearchResult::kNoMatch);
+    EXPECT_EQ(fake->SingleSearch(FilterOp::kEq, SqlValue(), 1u),
+              SingleSearchResult::kMatch);
+  }
+  {
+    // Range
+    auto fake = FakeStorageChain::SearchSubset(5, Range(1, 4));
+    EXPECT_EQ(fake->SingleSearch(FilterOp::kEq, SqlValue(), 0),
+              SingleSearchResult::kNoMatch);
+    EXPECT_EQ(fake->SingleSearch(FilterOp::kEq, SqlValue(), 1u),
+              SingleSearchResult::kMatch);
+  }
+}
+
+TEST(FakeStorage, IndexSearchValidated) {
+  {
+    // All passes
+    Indices indices = Indices::CreateWithIndexPayloadForTesting(
+        {1u, 0u, 3u}, Indices::State::kNonmonotonic);
+    auto fake = FakeStorageChain::SearchAll(5);
+    fake->IndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    ASSERT_THAT(utils::ExtractPayloadForTesting(indices), ElementsAre(0, 1, 2));
+  }
+  {
+    // None passes
+    Indices indices = Indices::CreateWithIndexPayloadForTesting(
+        {1u, 0u, 3u}, Indices::State::kNonmonotonic);
+    auto fake = FakeStorageChain::SearchNone(5);
+    fake->IndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    EXPECT_TRUE(utils::ExtractPayloadForTesting(indices).empty());
+  }
+  {
+    // BitVector
+    Indices indices = Indices::CreateWithIndexPayloadForTesting(
+        {1u, 0u, 3u}, Indices::State::kNonmonotonic);
+    auto fake = FakeStorageChain::SearchSubset(5, BitVector{0, 1, 0, 1, 0});
+    fake->IndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    ASSERT_THAT(utils::ExtractPayloadForTesting(indices), ElementsAre(0, 2));
+  }
+  {
+    // Index vector
+    Indices indices = Indices::CreateWithIndexPayloadForTesting(
+        {1u, 0u, 3u}, Indices::State::kNonmonotonic);
+    auto fake =
+        FakeStorageChain::SearchSubset(5, std::vector<uint32_t>{1, 2, 3});
+    fake->IndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    ASSERT_THAT(utils::ExtractPayloadForTesting(indices), ElementsAre(0, 2));
+  }
+  {
+    // Range
+    Indices indices = Indices::CreateWithIndexPayloadForTesting(
+        {1u, 0u, 3u}, Indices::State::kNonmonotonic);
+    auto fake = FakeStorageChain::SearchSubset(5, Range(1, 4));
+    fake->IndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    ASSERT_THAT(utils::ExtractPayloadForTesting(indices), ElementsAre(0, 2));
+  }
+}
+
+TEST(FakeStorage, OrderedIndexSearchValidated) {
+  std::vector<uint32_t> table_idx{4, 3, 2, 1};
+  OrderedIndices indices{table_idx.data(), uint32_t(table_idx.size()),
+                         Indices::State::kNonmonotonic};
+  {
+    // All passes
+    auto fake = FakeStorageChain::SearchAll(5);
+    Range ret =
+        fake->OrderedIndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    EXPECT_EQ(ret, Range(0, 4));
+  }
+  {
+    // None passes
+    auto fake = FakeStorageChain::SearchNone(5);
+    Range ret =
+        fake->OrderedIndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    EXPECT_EQ(ret, Range(0, 0));
+  }
+  {
+    // BitVector
+    auto fake = FakeStorageChain::SearchSubset(5, BitVector{0, 0, 1, 1, 1});
+    Range ret =
+        fake->OrderedIndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    EXPECT_EQ(ret, Range(0, 3));
+  }
+  {
+    // Index vector
+    auto fake =
+        FakeStorageChain::SearchSubset(5, std::vector<uint32_t>{1, 2, 3});
+    Range ret =
+        fake->OrderedIndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    EXPECT_EQ(ret, Range(1, 4));
+  }
+  {
+    // Range
+    auto fake = FakeStorageChain::SearchSubset(5, Range(1, 4));
+    Range ret =
+        fake->OrderedIndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
+    EXPECT_EQ(ret, Range(1, 4));
+  }
+}
+
+}  // namespace
+}  // namespace column
+}  // namespace perfetto::trace_processor
diff --git a/src/trace_processor/db/column/range_overlay_unittest.cc b/src/trace_processor/db/column/range_overlay_unittest.cc
index a965be3..240e998 100644
--- a/src/trace_processor/db/column/range_overlay_unittest.cc
+++ b/src/trace_processor/db/column/range_overlay_unittest.cc
@@ -95,14 +95,17 @@
 TEST(RangeOverlay, IndexSearch) {
   auto fake =
       FakeStorageChain::SearchSubset(8, BitVector({0, 1, 0, 1, 0, 1, 0, 0}));
+
+  // {true, false}
   Range range(3, 5);
   RangeOverlay storage(&range);
   auto chain = storage.MakeChain(std::move(fake));
 
+  // {true, false, true}
   Indices indices = Indices::CreateWithIndexPayloadForTesting(
-      {1u, 0u, 3u}, Indices::State::kNonmonotonic);
+      {0, 1, 0}, Indices::State::kNonmonotonic);
   chain->IndexSearch(FilterOp::kGe, SqlValue::Long(0u), indices);
-  ASSERT_THAT(utils::ExtractPayloadForTesting(indices), ElementsAre(1u));
+  ASSERT_THAT(utils::ExtractPayloadForTesting(indices), ElementsAre(0, 2));
 }
 
 TEST(RangeOverlay, StableSort) {
diff --git a/src/trace_processor/importers/proto/metadata_minimal_module.cc b/src/trace_processor/importers/proto/metadata_minimal_module.cc
index df4dd3f..6911275 100644
--- a/src/trace_processor/importers/proto/metadata_minimal_module.cc
+++ b/src/trace_processor/importers/proto/metadata_minimal_module.cc
@@ -189,11 +189,6 @@
       return;
     protos::pbzero::BackgroundTracingMetadata::TriggerRule::Decoder
         triggered_rule_decoder(triggered_rule.data, triggered_rule.size);
-    if (!triggered_rule_decoder.has_name_hash())
-      return;
-    metadata->SetDynamicMetadata(
-        storage->InternString("cr-triggered_rule_name_hash"),
-        Variadic::Integer(triggered_rule_decoder.name_hash()));
   }
 }
 
diff --git a/src/trace_processor/importers/proto/metadata_module.cc b/src/trace_processor/importers/proto/metadata_module.cc
index 9011737..86cc72b 100644
--- a/src/trace_processor/importers/proto/metadata_module.cc
+++ b/src/trace_processor/importers/proto/metadata_module.cc
@@ -135,6 +135,11 @@
   }
   context_->slice_tracker->Scoped(ts, track_id, cat_id, name_id,
                                   /* duration = */ 0);
+
+  MetadataTracker* metadata = context_->metadata_tracker.get();
+  metadata->SetDynamicMetadata(
+      context_->storage->InternString("cr-triggered_rule_name_hash"),
+      Variadic::Integer(trigger.trigger_name_hash()));
 }
 
 void MetadataModule::ParseTraceUuid(ConstBytes blob) {
diff --git a/src/trace_processor/perfetto_sql/intrinsics/functions/structural_tree_partition.cc b/src/trace_processor/perfetto_sql/intrinsics/functions/structural_tree_partition.cc
index c3d2ac1..2d68548 100644
--- a/src/trace_processor/perfetto_sql/intrinsics/functions/structural_tree_partition.cc
+++ b/src/trace_processor/perfetto_sql/intrinsics/functions/structural_tree_partition.cc
@@ -178,6 +178,7 @@
       // Keep track of the fact this node was processed and update the ancestor
       // id for all children.
       ss.first_pass_done = true;
+      ss.prev_ancestor_id_for_group = ancestor_id_for_group[ss.row.group];
       ancestor_id_for_group[ss.row.group] = ss.row.id;
 
       const auto* start = helper.ChildrenForIdBegin(ss.row.id);
diff --git a/src/trace_processor/perfetto_sql/stdlib/graphs/partition.sql b/src/trace_processor/perfetto_sql/stdlib/graphs/partition.sql
index 44cd8ce..d406aea 100644
--- a/src/trace_processor/perfetto_sql/stdlib/graphs/partition.sql
+++ b/src/trace_processor/perfetto_sql/stdlib/graphs/partition.sql
@@ -22,30 +22,9 @@
 --
 -- Example:
 -- Input
---   id | parent_id | group_key
---   ---+-----------+--------
---   1  | NULL      | 1
---   2  | 1         | 1
---   3  | 2         | 2
---   4  | 2         | 2
---   5  | 4         | 1
---   6  | 4         | 3
---   7  | 4         | 2
 --
--- Or as a graph:
---         1 (1)
---        /
---       2 (1)
---      /  \
---     3 (2) 4 (2)
---            \
---             5 (1)
---            /  \
---         6 (3) 7 (2)
---
--- Possible output (order of rows is implementation-defined)
 --   id | parent_id | group_key
---   ---+-----------+-------
+--   ---|-----------|----------
 --   1  | NULL      | 1
 --   2  | 1         | 1
 --   3  | NULL      | 2
@@ -53,13 +32,41 @@
 --   5  | 2         | 1
 --   6  | NULL      | 3
 --   7  | 4         | 2
+--   8  | 4         | 1
+--
+-- Or as a graph:
+-- ```
+--         1 (1)
+--        /
+--       2 (1)
+--      /  \
+--     3 (2) 4 (2)
+--           /   \
+--         5 (1) 8 (1)
+--        /  \
+--     6 (3) 7 (2)
+-- ```
+-- Possible output (order of rows is implementation-defined)
+--
+--   id | parent_id | group_key
+--   ---|-----------|-------
+--   1  | NULL      | 1
+--   2  | 1         | 1
+--   3  | NULL      | 2
+--   4  | NULL      | 2
+--   5  | 2         | 1
+--   6  | NULL      | 3
+--   7  | 4         | 2
+--   8  | 2         | 1
 --
 -- Or as a forest:
---    1 (1)       3 (2)      4 (2)        6 (3)
---     |                      |
---    2 (1)                  7 (2)
---     |
---    5 (1)
+-- ```
+--     1 (1)       3 (2)      4 (2)        6 (3)
+--      |                      |
+--     2 (1)                  7 (2)
+--     /   \
+--   5 (1) 8 (1)
+-- ```
 CREATE PERFETTO MACRO tree_structural_partition_by_group(
   -- A table/view/subquery corresponding to a tree which should be partitioned.
   -- This table must have the columns "id", "parent_id" and "group_key".
diff --git a/src/trace_processor/perfetto_sql/stdlib/memory/heap_graph_dominator_tree.sql b/src/trace_processor/perfetto_sql/stdlib/memory/heap_graph_dominator_tree.sql
index 28edf7f..f0cfae1 100644
--- a/src/trace_processor/perfetto_sql/stdlib/memory/heap_graph_dominator_tree.sql
+++ b/src/trace_processor/perfetto_sql/stdlib/memory/heap_graph_dominator_tree.sql
@@ -18,20 +18,21 @@
 -- Excluding following types from the graph as they share objects' ownership
 -- with their real (more interesting) owners and will mask their idom to be the
 -- "super root".
-CREATE PERFETTO TABLE _excluded_type_ids AS
-WITH RECURSIVE class_visitor(type_id) AS (
-  SELECT id AS type_id
-  FROM heap_graph_class
-  WHERE name IN (
-    'java.lang.ref.PhantomReference',
-    'java.lang.ref.FinalizerReference'
-  )
-  UNION ALL
-  SELECT child.id AS type_id
-  FROM heap_graph_class child
-  JOIN class_visitor parent ON parent.type_id = child.superclass_id
-)
-SELECT * FROM class_visitor;
+CREATE PERFETTO TABLE _ref_type_ids AS
+SELECT id AS type_id FROM heap_graph_class
+WHERE kind IN (
+  'KIND_FINALIZER_REFERENCE',
+  'KIND_PHANTOM_REFERENCE',
+  'KIND_SOFT_REFERENCE',
+  'KIND_WEAK_REFERENCE');
+
+CREATE PERFETTO TABLE _excluded_refs AS
+SELECT ref.id
+  FROM _ref_type_ids
+  JOIN heap_graph_object robj USING (type_id)
+  JOIN heap_graph_reference ref USING (reference_set_id)
+WHERE ref.field_name = 'java.lang.ref.Reference.referent'
+ORDER BY ref.id;
 
 -- The assigned id of the "super root".
 -- Since a Java heap graph is a "forest" structure, we need to add a imaginary
@@ -48,7 +49,8 @@
   ref.owned_id AS dest_node_id
 FROM heap_graph_reference ref
 JOIN heap_graph_object source_node ON ref.owner_id = source_node.id
-WHERE source_node.reachable AND source_node.type_id NOT IN _excluded_type_ids
+WHERE source_node.reachable
+  AND ref.id NOT IN _excluded_refs
   AND ref.owned_id IS NOT NULL
 UNION ALL
 SELECT
diff --git a/src/trace_redaction/redact_sched_switch.cc b/src/trace_redaction/redact_sched_switch.cc
index 2f85efe..55777f1 100644
--- a/src/trace_redaction/redact_sched_switch.cc
+++ b/src/trace_redaction/redact_sched_switch.cc
@@ -88,13 +88,17 @@
     switch (field.id()) {
       case protos::pbzero::SchedSwitchFtraceEvent::kNextCommFieldNumber:
         if (next_slice.uid == context.package_uid) {
-          proto_util::AppendField(field, sched_switch_message);
+          sched_switch_message->set_next_comm(field.as_string());
+        } else {
+          sched_switch_message->set_next_comm("");
         }
         break;
 
       case protos::pbzero::SchedSwitchFtraceEvent::kPrevCommFieldNumber:
         if (prev_slice.uid == context.package_uid) {
-          proto_util::AppendField(field, sched_switch_message);
+          sched_switch_message->set_prev_comm(field.as_string());
+        } else {
+          sched_switch_message->set_prev_comm("");
         }
         break;
 
diff --git a/src/trace_redaction/redact_sched_switch_integrationtest.cc b/src/trace_redaction/redact_sched_switch_integrationtest.cc
index 3b3523f..67948f2 100644
--- a/src/trace_redaction/redact_sched_switch_integrationtest.cc
+++ b/src/trace_redaction/redact_sched_switch_integrationtest.cc
@@ -175,18 +175,19 @@
       const auto* next_comm = expected_names.Find(next_pid);
       const auto* prev_comm = expected_names.Find(prev_pid);
 
+      EXPECT_TRUE(sched_decoder.has_next_comm());
+      EXPECT_TRUE(sched_decoder.has_prev_comm());
+
       if (next_comm) {
-        EXPECT_TRUE(sched_decoder.has_next_comm());
         EXPECT_EQ(sched_decoder.next_comm().ToStdString(), *next_comm);
       } else {
-        EXPECT_FALSE(sched_decoder.has_next_comm());
+        EXPECT_EQ(sched_decoder.next_comm().size, 0u);
       }
 
       if (prev_comm) {
-        EXPECT_TRUE(sched_decoder.has_prev_comm());
         EXPECT_EQ(sched_decoder.prev_comm().ToStdString(), *prev_comm);
       } else {
-        EXPECT_FALSE(sched_decoder.has_prev_comm());
+        EXPECT_EQ(sched_decoder.prev_comm().size, 0u);
       }
     }
   }
diff --git a/src/trace_redaction/redact_sched_switch_unittest.cc b/src/trace_redaction/redact_sched_switch_unittest.cc
index 174a35f..f6a4c13 100644
--- a/src/trace_redaction/redact_sched_switch_unittest.cc
+++ b/src/trace_redaction/redact_sched_switch_unittest.cc
@@ -108,7 +108,7 @@
   ASSERT_FALSE(result.ok());
 }
 
-TEST_F(RedactSchedSwitchTest, ClearsPrevAndNext) {
+TEST_F(RedactSchedSwitchTest, ReplacePrevAndNextWithEmptyStrings) {
   RedactSchedSwitch redact;
 
   Context context;
@@ -131,15 +131,15 @@
 
   ASSERT_TRUE(event.has_sched_switch());
 
-  // Pid should always carry over; only the comm value should get removed.
-  ASSERT_TRUE(event.sched_switch().has_next_pid());
-  ASSERT_FALSE(event.sched_switch().has_next_comm());
+  // Cleared prev and next comm.
+  ASSERT_TRUE(event.sched_switch().has_prev_comm());
+  ASSERT_TRUE(event.sched_switch().prev_comm().empty());
 
-  ASSERT_TRUE(event.sched_switch().has_prev_pid());
-  ASSERT_FALSE(event.sched_switch().has_prev_comm());
+  ASSERT_TRUE(event.sched_switch().has_next_comm());
+  ASSERT_TRUE(event.sched_switch().next_comm().empty());
 }
 
-TEST_F(RedactSchedSwitchTest, ClearsPrev) {
+TEST_F(RedactSchedSwitchTest, ReplacePrevWithEmptyStrings) {
   RedactSchedSwitch redact;
 
   Context context;
@@ -162,15 +162,15 @@
 
   ASSERT_TRUE(event.has_sched_switch());
 
-  // Pid should always carry over; only the comm value should get removed.
-  ASSERT_TRUE(event.sched_switch().has_next_pid());
-  ASSERT_TRUE(event.sched_switch().has_next_comm());
+  // Only cleared the prev comm.
+  ASSERT_TRUE(event.sched_switch().has_prev_comm());
+  ASSERT_TRUE(event.sched_switch().prev_comm().empty());
 
-  ASSERT_TRUE(event.sched_switch().has_prev_pid());
-  ASSERT_FALSE(event.sched_switch().has_prev_comm());
+  ASSERT_TRUE(event.sched_switch().has_next_comm());
+  ASSERT_FALSE(event.sched_switch().next_comm().empty());
 }
 
-TEST_F(RedactSchedSwitchTest, ClearNext) {
+TEST_F(RedactSchedSwitchTest, ReplaceNextWithEmptyStrings) {
   RedactSchedSwitch redact;
 
   Context context;
@@ -193,12 +193,12 @@
 
   ASSERT_TRUE(event.has_sched_switch());
 
-  // Pid should always carry over; only the comm value should get removed.
-  ASSERT_TRUE(event.sched_switch().has_next_pid());
-  ASSERT_FALSE(event.sched_switch().has_next_comm());
-
-  ASSERT_TRUE(event.sched_switch().has_prev_pid());
   ASSERT_TRUE(event.sched_switch().has_prev_comm());
+  ASSERT_FALSE(event.sched_switch().prev_comm().empty());
+
+  // Only cleared the next comm.
+  ASSERT_TRUE(event.sched_switch().has_next_comm());
+  ASSERT_TRUE(event.sched_switch().next_comm().empty());
 }
 
 }  // namespace perfetto::trace_redaction
diff --git a/test/data/chrome_input_with_frame_view.pftrace.sha256 b/test/data/chrome_input_with_frame_view.pftrace.sha256
index ea5a606..d0943a8 100644
--- a/test/data/chrome_input_with_frame_view.pftrace.sha256
+++ b/test/data/chrome_input_with_frame_view.pftrace.sha256
@@ -1 +1 @@
-1e4e1b7098c3c1b900d31fa6d6791e7b022e85ecebbb560123ce7139b3f82231
\ No newline at end of file
+a93548822e481508c728ccc5da3ad34afcd0aec02ca7a7a4dad84ff340ee5975
\ No newline at end of file
diff --git a/test/trace_processor/diff_tests/metrics/chrome/tests_scroll_jank.py b/test/trace_processor/diff_tests/metrics/chrome/tests_scroll_jank.py
index 70dec5c..fa52426 100644
--- a/test/trace_processor/diff_tests/metrics/chrome/tests_scroll_jank.py
+++ b/test/trace_processor/diff_tests/metrics/chrome/tests_scroll_jank.py
@@ -429,32 +429,18 @@
         query=Metric('chrome_scroll_jank_v3'),
         out=TextProto(r"""
         [perfetto.protos.chrome_scroll_jank_v3] {
-          trace_num_frames: 291
-          trace_num_janky_frames: 3
-          trace_scroll_jank_percentage: 1.0309278350515463
-          vsync_interval_ms: 16.368
+          trace_num_frames: 354
+          trace_num_janky_frames: 1
+          trace_scroll_jank_percentage: 0.2824858757062147
+          vsync_interval_ms: 10.483
           scrolls {
-            num_frames: 105
-            num_janky_frames: 2
-            scroll_jank_percentage: 1.9047619047619047
-            max_delay_since_last_frame: 6.126221896383187
-            scroll_jank_causes {
-              cause: "RendererCompositorQueueingDelay"
-              delay_since_last_frame: 2.044354838709678
-            }
-            scroll_jank_causes {
-              cause: "RendererCompositorFinishedToBeginImplFrame"
-              delay_since_last_frame: 6.126221896383187
-            }
-          }
-          scrolls {
-            num_frames: 84
+            num_frames: 122
             num_janky_frames: 1
-            scroll_jank_percentage: 1.1904761904761905
-            max_delay_since_last_frame: 2.040811339198436
+            scroll_jank_percentage: 0.819672131147541
+            max_delay_since_last_frame: 2.13021081751407
             scroll_jank_causes {
               cause: "RendererCompositorQueueingDelay"
-              delay_since_last_frame: 2.040811339198436
+              delay_since_last_frame: 2.13021081751407
             }
           }
         }
@@ -469,8 +455,8 @@
         INCLUDE PERFETTO MODULE chrome.scroll_jank.scroll_jank_v3;
 
         SELECT
-          _HAS_DESCENDANT_SLICE_WITH_NAME(
-            (SELECT id from slice where dur = 46046000),
+          HAS_DESCENDANT_SLICE_WITH_NAME(
+            (SELECT id from slice where dur = 60156000),
             'SwapEndToPresentationCompositorFrame') AS has_descendant;
         """,
         out=Csv("""
@@ -487,8 +473,8 @@
         INCLUDE PERFETTO MODULE chrome.scroll_jank.scroll_jank_v3;
 
         SELECT
-          _HAS_DESCENDANT_SLICE_WITH_NAME(
-            (SELECT id from slice where dur = 11666000),
+          HAS_DESCENDANT_SLICE_WITH_NAME(
+            (SELECT id from slice where dur = 77247000),
             'SwapEndToPresentationCompositorFrame') AS has_descendant;
         """,
         out=Csv("""
@@ -506,7 +492,7 @@
 
         SELECT
           _DESCENDANT_SLICE_END(
-            (SELECT id from slice where dur = 11666000),
+            (SELECT id from slice where dur = 77247000),
             'SwapEndToPresentationCompositorFrame') AS end_ts;
         """,
         out=Csv("""
@@ -524,10 +510,10 @@
 
         SELECT
           _DESCENDANT_SLICE_END(
-            (SELECT id from slice where dur = 46046000),
+            (SELECT id from slice where dur = 60156000),
             'SwapEndToPresentationCompositorFrame') AS end_ts;
         """,
         out=Csv("""
         "end_ts"
-        174797566610797
+        1035869424631926
         """))
\ No newline at end of file
diff --git a/test/trace_processor/diff_tests/parser/parsing/chrome_metadata.out b/test/trace_processor/diff_tests/parser/parsing/chrome_metadata.out
index 5635f53..7c32d9a 100644
--- a/test/trace_processor/diff_tests/parser/parsing/chrome_metadata.out
+++ b/test/trace_processor/diff_tests/parser/parsing/chrome_metadata.out
@@ -1,5 +1,5 @@
 "id","type","name","key_type","int_value","str_value"
-0,"metadata","trace_uuid","single","[NULL]","00000000-0000-0000-afe8-083bd6899d9d"
+0,"metadata","trace_uuid","single","[NULL]","00000000-0000-0000-7f42-b235fa358661"
 1,"metadata","trace_time_clock_id","single",6,"[NULL]"
 2,"metadata","cr-a-playstore_version_code","single",101,"[NULL]"
 3,"metadata","cr-a-enabled_categories","single","[NULL]","cat1,cat2,cat3"
@@ -7,5 +7,5 @@
 5,"metadata","cr-background_tracing_metadata","single","[NULL]","CgUlDsAbXx2RziSz"
 6,"metadata","cr-scenario_name_hash","single",3005533841,"[NULL]"
 7,"metadata","cr-triggered_rule_name_hash","single",1595654158,"[NULL]"
-8,"metadata","trace_size_bytes","single",78,"[NULL]"
+8,"metadata","trace_size_bytes","single",95,"[NULL]"
 9,"metadata","trace_type","single","[NULL]","proto"
diff --git a/test/trace_processor/diff_tests/parser/parsing/tests.py b/test/trace_processor/diff_tests/parser/parsing/tests.py
index 505bdf9..91d2f6e 100644
--- a/test/trace_processor/diff_tests/parser/parsing/tests.py
+++ b/test/trace_processor/diff_tests/parser/parsing/tests.py
@@ -632,6 +632,13 @@
           timestamp: 101000002
         }
         packet {
+          chrome_trigger {
+            trigger_name_hash: 1595654158
+          }
+          trusted_packet_sequence_id: 1
+          timestamp: 101000002
+        }
+        packet {
           trusted_packet_sequence_id: 1
           timestamp: 101000002
           chrome_metadata {
diff --git a/test/trace_processor/diff_tests/stdlib/graphs/partition_tests.py b/test/trace_processor/diff_tests/stdlib/graphs/partition_tests.py
index 5dab718..0ad780e 100644
--- a/test/trace_processor/diff_tests/stdlib/graphs/partition_tests.py
+++ b/test/trace_processor/diff_tests/stdlib/graphs/partition_tests.py
@@ -38,7 +38,8 @@
             (4, 2,    2),
             (5, 4,    1),
             (6, 4,    3),
-            (7, 4,    2)
+            (7, 4,    2),
+            (8, 4,    1)
           )
           SELECT * FROM data;
 
@@ -55,4 +56,5 @@
         5,2,1
         6,"[NULL]",3
         7,4,2
+        8,2,1
         """))
diff --git a/test/trace_processor/diff_tests/stdlib/memory/heap_graph_dominator_tree_tests.py b/test/trace_processor/diff_tests/stdlib/memory/heap_graph_dominator_tree_tests.py
index 52bb465..0aa0f64 100644
--- a/test/trace_processor/diff_tests/stdlib/memory/heap_graph_dominator_tree_tests.py
+++ b/test/trace_processor/diff_tests/stdlib/memory/heap_graph_dominator_tree_tests.py
@@ -65,7 +65,7 @@
           19,14,1,128,4,"U"
           20,14,1,256,4,"V"
           21,14,1,512,4,"W"
-          23,25,1,1024,1,"java.lang.ref.FinalizerReference"
+          23,25,1,1024,1,"sun.misc.Cleaner"
         """))
 
   def test_heap_graph_super_root_fn(self):
diff --git a/test/trace_processor/diff_tests/stdlib/memory/heap_graph_for_dominator_tree.textproto b/test/trace_processor/diff_tests/stdlib/memory/heap_graph_for_dominator_tree.textproto
index b0de072..940b38f 100644
--- a/test/trace_processor/diff_tests/stdlib/memory/heap_graph_for_dominator_tree.textproto
+++ b/test/trace_processor/diff_tests/stdlib/memory/heap_graph_for_dominator_tree.textproto
@@ -26,7 +26,7 @@
   # 2. a synthetic tree whose dominator tree is itself. It's drawn below with
   #    each object represented by it's class name. Number in the bracket is the
   #    size of each node in bytes.
-  #                 S[1]     java.lang.ref.FinalizerReference[1024]
+  #                 S[1]     sun.misc.Cleaner[1024]
   #                /    \    /
   #            M[2]      N[4]
   #           /   \      /   \
@@ -191,7 +191,7 @@
     }
     objects {
       id: 0x18
-      type_id: 24 # "java.lang.ref.FinalizerReference"
+      type_id: 24 # "sun.misc.Cleaner"
       self_size: 1024
       reference_object_id: 0x0e
     }
@@ -335,9 +335,15 @@
     }
     types {
       id: 24
-      class_name: "java.lang.ref.FinalizerReference"
+      class_name: "sun.misc.Cleaner"
+      kind: KIND_PHANTOM_REFERENCE
+      reference_field_id: 123
       location_id: 1
     }
+    field_names {
+      iid: 123
+      str: "java.lang.ref.Reference.referent"
+    }
     continued: false
     index: 1
   }
diff --git a/test/trace_processor/diff_tests/stdlib/slices/tests.py b/test/trace_processor/diff_tests/stdlib/slices/tests.py
index 053dabf..747ffb2 100644
--- a/test/trace_processor/diff_tests/stdlib/slices/tests.py
+++ b/test/trace_processor/diff_tests/stdlib/slices/tests.py
@@ -76,21 +76,23 @@
 
         SELECT e.name, e.ts, e.dur, e.depth
         FROM _slice_flattened e
-        JOIN thread_track ON e.track_id = thread_track.id
-        JOIN thread USING(utid)
-        WHERE thread.tid = 30944;
+          JOIN thread_track ON e.track_id = thread_track.id
+          JOIN thread USING(utid)
+        WHERE thread.tid = 30196
+        LIMIT 10;
       """,
         out=Csv("""
         "name","ts","dur","depth"
-        "ThreadControllerImpl::RunTask",174793737042797,3937000,0
-        "ThreadControllerImpl::RunTask",174793741016797,5930000,0
-        "ThreadControllerImpl::RunTask",174793747000797,47000,0
-        "Receive mojo message",174793747047797,136000,1
-        "ThreadControllerImpl::RunTask",174793747183797,17000,0
-        "Looper.dispatch: android.os.Handler(Kx3@57873a8)",174793747546797,119000,0
-        "ThreadControllerImpl::RunTask",174796099970797,186000,0
-        "Looper.dispatch: jy3(null)",174800056530797,1368000,0
-        "ThreadControllerImpl::RunTask",174800107962797,132000,0
+        "EventForwarder::OnTouchEvent",1035865509936036,211000,0
+        "EventForwarder::OnTouchEvent",1035865510234036,48000,0
+        "EventForwarder::OnTouchEvent",1035865510673036,10000,0
+        "GestureProvider::OnTouchEvent",1035865510147036,87000,1
+        "RenderWidgetHostImpl::ForwardTouchEvent",1035865510282036,41000,1
+        "RenderWidgetHostImpl::ForwardTouchEvent",1035865510331036,16000,1
+        "RenderWidgetHostImpl::ForwardTouchEvent",1035865510670036,3000,1
+        "LatencyInfo.Flow",1035865510323036,8000,2
+        "PassthroughTouchEventQueue::QueueEvent",1035865510347036,30000,2
+        "PassthroughTouchEventQueue::QueueEvent",1035865510666036,4000,2
       """))
 
   def test_thread_slice_cpu_time(self):
diff --git a/tools/cpu_profile b/tools/cpu_profile
index 17f0996..ece5095 100755
--- a/tools/cpu_profile
+++ b/tools/cpu_profile
@@ -219,6 +219,9 @@
 import platform
 import subprocess
 import sys
+import threading
+
+DOWNLOAD_LOCK = threading.Lock()
 
 
 def download_or_get_cached(file_name, url, sha256):
@@ -235,28 +238,36 @@
   sha256_path = os.path.join(dir, file_name + '.sha256')
   needs_download = True
 
-  # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
-  # download is cached into file_name.sha256, just check if that matches.
-  if os.path.exists(bin_path) and os.path.exists(sha256_path):
-    with open(sha256_path, 'rb') as f:
-      digest = f.read().decode()
-      if digest == sha256:
-        needs_download = False
+  try:
+    # In BatchTraceProcessor, many threads can be trying to execute the below
+    # code in parallel. For this reason, protect the whole operation with a
+    # lock.
+    DOWNLOAD_LOCK.acquire()
 
-  if needs_download:
-    # Either the filed doesn't exist or the SHA256 doesn't match.
-    tmp_path = bin_path + '.tmp'
-    print('Downloading ' + url)
-    subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
-    with open(tmp_path, 'rb') as fd:
-      actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
-    if actual_sha256 != sha256:
-      raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
-                      (url, actual_sha256, sha256))
-    os.chmod(tmp_path, 0o755)
-    os.replace(tmp_path, bin_path)
-    with open(sha256_path, 'w') as f:
-      f.write(sha256)
+    # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
+    # download is cached into file_name.sha256, just check if that matches.
+    if os.path.exists(bin_path) and os.path.exists(sha256_path):
+      with open(sha256_path, 'rb') as f:
+        digest = f.read().decode()
+        if digest == sha256:
+          needs_download = False
+
+    if needs_download:
+      # Either the filed doesn't exist or the SHA256 doesn't match.
+      tmp_path = bin_path + '.tmp'
+      print('Downloading ' + url)
+      subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
+      with open(tmp_path, 'rb') as fd:
+        actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
+      if actual_sha256 != sha256:
+        raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
+                        (url, actual_sha256, sha256))
+      os.chmod(tmp_path, 0o755)
+      os.replace(tmp_path, bin_path)
+      with open(sha256_path, 'w') as f:
+        f.write(sha256)
+  finally:
+    DOWNLOAD_LOCK.release()
   return bin_path
 
 
diff --git a/tools/gen_android_bp b/tools/gen_android_bp
index d71b0dc..c8a2b1a 100755
--- a/tools/gen_android_bp
+++ b/tools/gen_android_bp
@@ -768,7 +768,6 @@
   # The .proto filegroup will be added to `tool_files` of rdeps so that the
   # genrules can be sandboxed.
 
-  tool_files = set()
   for proto_dep in target.proto_deps().union(target.transitive_proto_deps()):
     tool_files.add(":" + label_to_module_name(proto_dep.name))
 
diff --git a/tools/gen_tp_table_headers.py b/tools/gen_tp_table_headers.py
index f15245e..91e4cdb 100755
--- a/tools/gen_tp_table_headers.py
+++ b/tools/gen_tp_table_headers.py
@@ -72,7 +72,8 @@
   ]
   headers: Dict[str, Header] = {}
   for table in parse_tables_from_modules(modules):
-    input_path = os.path.relpath(table.table.python_module, ROOT_DIR)
+    raw_path = table.table.python_module
+    input_path = raw_path[raw_path.rfind('/src') + 1:]
     header = headers.get(input_path, Header([]))
     header.tables.append(table)
     headers[input_path] = header
diff --git a/tools/gn_utils.py b/tools/gn_utils.py
index d0417d7..904760e 100644
--- a/tools/gn_utils.py
+++ b/tools/gn_utils.py
@@ -529,9 +529,8 @@
     return metadata.get('exports', [])
 
   def get_proto_paths(self, proto_desc):
-    # import_dirs in metadata will be available for source_set targets.
     metadata = proto_desc.get('metadata', {})
-    return metadata.get('import_dirs', [])
+    return metadata.get('proto_import_dirs', [])
 
   def get_proto_target_type(self, target: Target
                            ) -> Tuple[Optional[str], Optional[Dict]]:
diff --git a/tools/heap_profile b/tools/heap_profile
index 700cfeb..eec9508 100755
--- a/tools/heap_profile
+++ b/tools/heap_profile
@@ -216,6 +216,9 @@
 import platform
 import subprocess
 import sys
+import threading
+
+DOWNLOAD_LOCK = threading.Lock()
 
 
 def download_or_get_cached(file_name, url, sha256):
@@ -232,28 +235,36 @@
   sha256_path = os.path.join(dir, file_name + '.sha256')
   needs_download = True
 
-  # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
-  # download is cached into file_name.sha256, just check if that matches.
-  if os.path.exists(bin_path) and os.path.exists(sha256_path):
-    with open(sha256_path, 'rb') as f:
-      digest = f.read().decode()
-      if digest == sha256:
-        needs_download = False
+  try:
+    # In BatchTraceProcessor, many threads can be trying to execute the below
+    # code in parallel. For this reason, protect the whole operation with a
+    # lock.
+    DOWNLOAD_LOCK.acquire()
 
-  if needs_download:
-    # Either the filed doesn't exist or the SHA256 doesn't match.
-    tmp_path = bin_path + '.tmp'
-    print('Downloading ' + url)
-    subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
-    with open(tmp_path, 'rb') as fd:
-      actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
-    if actual_sha256 != sha256:
-      raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
-                      (url, actual_sha256, sha256))
-    os.chmod(tmp_path, 0o755)
-    os.replace(tmp_path, bin_path)
-    with open(sha256_path, 'w') as f:
-      f.write(sha256)
+    # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
+    # download is cached into file_name.sha256, just check if that matches.
+    if os.path.exists(bin_path) and os.path.exists(sha256_path):
+      with open(sha256_path, 'rb') as f:
+        digest = f.read().decode()
+        if digest == sha256:
+          needs_download = False
+
+    if needs_download:
+      # Either the filed doesn't exist or the SHA256 doesn't match.
+      tmp_path = bin_path + '.tmp'
+      print('Downloading ' + url)
+      subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
+      with open(tmp_path, 'rb') as fd:
+        actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
+      if actual_sha256 != sha256:
+        raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
+                        (url, actual_sha256, sha256))
+      os.chmod(tmp_path, 0o755)
+      os.replace(tmp_path, bin_path)
+      with open(sha256_path, 'w') as f:
+        f.write(sha256)
+  finally:
+    DOWNLOAD_LOCK.release()
   return bin_path
 
 
diff --git a/tools/record_android_trace b/tools/record_android_trace
index ceb0808..f2a48c9 100755
--- a/tools/record_android_trace
+++ b/tools/record_android_trace
@@ -201,6 +201,9 @@
 import platform
 import subprocess
 import sys
+import threading
+
+DOWNLOAD_LOCK = threading.Lock()
 
 
 def download_or_get_cached(file_name, url, sha256):
@@ -217,28 +220,36 @@
   sha256_path = os.path.join(dir, file_name + '.sha256')
   needs_download = True
 
-  # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
-  # download is cached into file_name.sha256, just check if that matches.
-  if os.path.exists(bin_path) and os.path.exists(sha256_path):
-    with open(sha256_path, 'rb') as f:
-      digest = f.read().decode()
-      if digest == sha256:
-        needs_download = False
+  try:
+    # In BatchTraceProcessor, many threads can be trying to execute the below
+    # code in parallel. For this reason, protect the whole operation with a
+    # lock.
+    DOWNLOAD_LOCK.acquire()
 
-  if needs_download:
-    # Either the filed doesn't exist or the SHA256 doesn't match.
-    tmp_path = bin_path + '.tmp'
-    print('Downloading ' + url)
-    subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
-    with open(tmp_path, 'rb') as fd:
-      actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
-    if actual_sha256 != sha256:
-      raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
-                      (url, actual_sha256, sha256))
-    os.chmod(tmp_path, 0o755)
-    os.replace(tmp_path, bin_path)
-    with open(sha256_path, 'w') as f:
-      f.write(sha256)
+    # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
+    # download is cached into file_name.sha256, just check if that matches.
+    if os.path.exists(bin_path) and os.path.exists(sha256_path):
+      with open(sha256_path, 'rb') as f:
+        digest = f.read().decode()
+        if digest == sha256:
+          needs_download = False
+
+    if needs_download:
+      # Either the filed doesn't exist or the SHA256 doesn't match.
+      tmp_path = bin_path + '.tmp'
+      print('Downloading ' + url)
+      subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
+      with open(tmp_path, 'rb') as fd:
+        actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
+      if actual_sha256 != sha256:
+        raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
+                        (url, actual_sha256, sha256))
+      os.chmod(tmp_path, 0o755)
+      os.replace(tmp_path, bin_path)
+      with open(sha256_path, 'w') as f:
+        f.write(sha256)
+  finally:
+    DOWNLOAD_LOCK.release()
   return bin_path
 
 
diff --git a/tools/trace_processor b/tools/trace_processor
index 0917f54..972c9af 100755
--- a/tools/trace_processor
+++ b/tools/trace_processor
@@ -212,6 +212,9 @@
 import platform
 import subprocess
 import sys
+import threading
+
+DOWNLOAD_LOCK = threading.Lock()
 
 
 def download_or_get_cached(file_name, url, sha256):
@@ -228,28 +231,36 @@
   sha256_path = os.path.join(dir, file_name + '.sha256')
   needs_download = True
 
-  # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
-  # download is cached into file_name.sha256, just check if that matches.
-  if os.path.exists(bin_path) and os.path.exists(sha256_path):
-    with open(sha256_path, 'rb') as f:
-      digest = f.read().decode()
-      if digest == sha256:
-        needs_download = False
+  try:
+    # In BatchTraceProcessor, many threads can be trying to execute the below
+    # code in parallel. For this reason, protect the whole operation with a
+    # lock.
+    DOWNLOAD_LOCK.acquire()
 
-  if needs_download:
-    # Either the filed doesn't exist or the SHA256 doesn't match.
-    tmp_path = bin_path + '.tmp'
-    print('Downloading ' + url)
-    subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
-    with open(tmp_path, 'rb') as fd:
-      actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
-    if actual_sha256 != sha256:
-      raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
-                      (url, actual_sha256, sha256))
-    os.chmod(tmp_path, 0o755)
-    os.replace(tmp_path, bin_path)
-    with open(sha256_path, 'w') as f:
-      f.write(sha256)
+    # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
+    # download is cached into file_name.sha256, just check if that matches.
+    if os.path.exists(bin_path) and os.path.exists(sha256_path):
+      with open(sha256_path, 'rb') as f:
+        digest = f.read().decode()
+        if digest == sha256:
+          needs_download = False
+
+    if needs_download:
+      # Either the filed doesn't exist or the SHA256 doesn't match.
+      tmp_path = bin_path + '.tmp'
+      print('Downloading ' + url)
+      subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
+      with open(tmp_path, 'rb') as fd:
+        actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
+      if actual_sha256 != sha256:
+        raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
+                        (url, actual_sha256, sha256))
+      os.chmod(tmp_path, 0o755)
+      os.replace(tmp_path, bin_path)
+      with open(sha256_path, 'w') as f:
+        f.write(sha256)
+  finally:
+    DOWNLOAD_LOCK.release()
   return bin_path
 
 
diff --git a/tools/tracebox b/tools/tracebox
index a9a89e6..144df3a 100755
--- a/tools/tracebox
+++ b/tools/tracebox
@@ -198,6 +198,9 @@
 import platform
 import subprocess
 import sys
+import threading
+
+DOWNLOAD_LOCK = threading.Lock()
 
 
 def download_or_get_cached(file_name, url, sha256):
@@ -214,28 +217,36 @@
   sha256_path = os.path.join(dir, file_name + '.sha256')
   needs_download = True
 
-  # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
-  # download is cached into file_name.sha256, just check if that matches.
-  if os.path.exists(bin_path) and os.path.exists(sha256_path):
-    with open(sha256_path, 'rb') as f:
-      digest = f.read().decode()
-      if digest == sha256:
-        needs_download = False
+  try:
+    # In BatchTraceProcessor, many threads can be trying to execute the below
+    # code in parallel. For this reason, protect the whole operation with a
+    # lock.
+    DOWNLOAD_LOCK.acquire()
 
-  if needs_download:
-    # Either the filed doesn't exist or the SHA256 doesn't match.
-    tmp_path = bin_path + '.tmp'
-    print('Downloading ' + url)
-    subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
-    with open(tmp_path, 'rb') as fd:
-      actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
-    if actual_sha256 != sha256:
-      raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
-                      (url, actual_sha256, sha256))
-    os.chmod(tmp_path, 0o755)
-    os.replace(tmp_path, bin_path)
-    with open(sha256_path, 'w') as f:
-      f.write(sha256)
+    # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
+    # download is cached into file_name.sha256, just check if that matches.
+    if os.path.exists(bin_path) and os.path.exists(sha256_path):
+      with open(sha256_path, 'rb') as f:
+        digest = f.read().decode()
+        if digest == sha256:
+          needs_download = False
+
+    if needs_download:
+      # Either the filed doesn't exist or the SHA256 doesn't match.
+      tmp_path = bin_path + '.tmp'
+      print('Downloading ' + url)
+      subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
+      with open(tmp_path, 'rb') as fd:
+        actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
+      if actual_sha256 != sha256:
+        raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
+                        (url, actual_sha256, sha256))
+      os.chmod(tmp_path, 0o755)
+      os.replace(tmp_path, bin_path)
+      with open(sha256_path, 'w') as f:
+        f.write(sha256)
+  finally:
+    DOWNLOAD_LOCK.release()
   return bin_path
 
 
diff --git a/tools/traceconv b/tools/traceconv
index 8ba7114..817022c 100755
--- a/tools/traceconv
+++ b/tools/traceconv
@@ -212,6 +212,9 @@
 import platform
 import subprocess
 import sys
+import threading
+
+DOWNLOAD_LOCK = threading.Lock()
 
 
 def download_or_get_cached(file_name, url, sha256):
@@ -228,28 +231,36 @@
   sha256_path = os.path.join(dir, file_name + '.sha256')
   needs_download = True
 
-  # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
-  # download is cached into file_name.sha256, just check if that matches.
-  if os.path.exists(bin_path) and os.path.exists(sha256_path):
-    with open(sha256_path, 'rb') as f:
-      digest = f.read().decode()
-      if digest == sha256:
-        needs_download = False
+  try:
+    # In BatchTraceProcessor, many threads can be trying to execute the below
+    # code in parallel. For this reason, protect the whole operation with a
+    # lock.
+    DOWNLOAD_LOCK.acquire()
 
-  if needs_download:
-    # Either the filed doesn't exist or the SHA256 doesn't match.
-    tmp_path = bin_path + '.tmp'
-    print('Downloading ' + url)
-    subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
-    with open(tmp_path, 'rb') as fd:
-      actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
-    if actual_sha256 != sha256:
-      raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
-                      (url, actual_sha256, sha256))
-    os.chmod(tmp_path, 0o755)
-    os.replace(tmp_path, bin_path)
-    with open(sha256_path, 'w') as f:
-      f.write(sha256)
+    # Avoid recomputing the SHA-256 on each invocation. The SHA-256 of the last
+    # download is cached into file_name.sha256, just check if that matches.
+    if os.path.exists(bin_path) and os.path.exists(sha256_path):
+      with open(sha256_path, 'rb') as f:
+        digest = f.read().decode()
+        if digest == sha256:
+          needs_download = False
+
+    if needs_download:
+      # Either the filed doesn't exist or the SHA256 doesn't match.
+      tmp_path = bin_path + '.tmp'
+      print('Downloading ' + url)
+      subprocess.check_call(['curl', '-f', '-L', '-#', '-o', tmp_path, url])
+      with open(tmp_path, 'rb') as fd:
+        actual_sha256 = hashlib.sha256(fd.read()).hexdigest()
+      if actual_sha256 != sha256:
+        raise Exception('Checksum mismatch for %s (actual: %s, expected: %s)' %
+                        (url, actual_sha256, sha256))
+      os.chmod(tmp_path, 0o755)
+      os.replace(tmp_path, bin_path)
+      with open(sha256_path, 'w') as f:
+        f.write(sha256)
+  finally:
+    DOWNLOAD_LOCK.release()
   return bin_path
 
 
diff --git a/ui/src/assets/details.scss b/ui/src/assets/details.scss
index d09690a..ff66708 100644
--- a/ui/src/assets/details.scss
+++ b/ui/src/assets/details.scss
@@ -446,112 +446,46 @@
   header.stale {
     color: grey;
   }
+}
 
-  .rows {
-    position: relative;
-    direction: ltr;
-    width: 100%;
+.pf-ftrace-explorer {
+  height: 100%;
+  font-size: 11px;
+  font-family: var(--monospace-font);
 
-    .row {
-      @include transition();
-      position: absolute;
-      width: 100%;
-      height: 20px;
-      line-height: 20px;
-      background-color: hsl(214, 22%, 100%);
+  .colour {
+    display: inline-block;
+    height: 10px;
+    width: 10px;
+    margin-right: 4px;
+  }
+}
 
-      &.D {
-        color: hsl(122, 20%, 40%);
-      }
-      &.V {
-        color: hsl(122, 20%, 30%);
-      }
-      &.I {
-        color: hsl(0, 0%, 20%);
-      }
-      &.W {
-        color: hsl(45, 60%, 45%);
-      }
-      &.E {
-        color: hsl(4, 90%, 58%);
-      }
-      &.F {
-        color: hsl(291, 64%, 42%);
-      }
-      &.stale {
-        color: #aaa;
-      }
-      &:nth-child(even) {
-        background-color: hsl(214, 22%, 95%);
-      }
-      &:hover {
-        background-color: $table-hover-color;
-      }
-      .cell {
-        font-size: 11px;
-        font-family: var(--monospace-font);
-        white-space: nowrap;
-        overflow: scroll;
-        padding-left: 10px;
-        padding-right: 10px;
-        display: inline-block;
-        &:first-child {
-          padding-left: 5px;
-        }
-        &:last-child {
-          padding-right: 5px;
-        }
-        &:only-child {
-          width: 100%;
-        }
+.pf-android-logs-table {
+  height: 100%;
+  font-size: 11px;
+  font-family: var(--monospace-font);
 
-        // The following children will be used as columns in the table showing
-        // Android logs.
-
-        // 1.Timestamp
-        &:nth-child(1) {
-          width: 7rem;
-          text-overflow: clip;
-          text-align: right;
-        }
-        // 2.Level
-        &:nth-child(2) {
-          width: 4rem;
-        }
-        // 3.Tag
-        &:nth-child(3) {
-          width: 13rem;
-        }
-
-        &.with-process {
-          // 4.Process name
-          &:nth-child(4) {
-            width: 18rem;
-          }
-          // 5.Message - a long string, will take most of the display space.
-          &:nth-child(5) {
-            width: calc(100% - 42rem);
-          }
-        }
-
-        &.no-process {
-          // 4.Message - a long string, will take most of the display space.
-          &:nth-child(4) {
-            width: calc(100% - 24rem);
-          }
-        }
-
-        &.row-header {
-          text-align: left;
-          font-weight: bold;
-          font-size: 13px;
-        }
-
-        &.row-header:first-child {
-          padding-left: 15px;
-        }
-      }
-    }
+  .D {
+    color: hsl(122, 20%, 40%);
+  }
+  .V {
+    color: hsl(122, 20%, 30%);
+  }
+  .I {
+    color: hsl(0, 0%, 20%);
+  }
+  .W {
+    color: hsl(45, 60%, 45%);
+  }
+  .E {
+    color: hsl(4, 90%, 58%);
+  }
+  .F {
+    color: hsl(291, 64%, 42%);
+  }
+  .pf-highlighted {
+    background: #d2efe0;
   }
 }
 
@@ -559,109 +493,6 @@
   margin: 10px;
 }
 
-.ftrace-panel {
-  display: contents;
-
-  .sticky {
-    position: sticky;
-    top: 0;
-    left: 0;
-    z-index: 1;
-    background-color: white;
-    color: #3c4b5d;
-    padding: 5px 10px;
-    display: grid;
-    grid-template-columns: auto auto;
-    justify-content: space-between;
-  }
-
-  .ftrace-rows-label {
-    display: flex;
-    align-items: center;
-  }
-
-  header.stale {
-    color: grey;
-  }
-
-  .rows {
-    position: relative;
-    direction: ltr;
-    min-width: 100%;
-    font-size: 12px;
-
-    .row {
-      @include transition();
-      position: absolute;
-      min-width: 100%;
-      line-height: 20px;
-      background-color: hsl(214, 22%, 100%);
-      white-space: nowrap;
-
-      &:nth-child(even) {
-        background-color: hsl(214, 22%, 95%);
-      }
-
-      &:hover {
-        background-color: $table-hover-color;
-      }
-
-      .cell {
-        font-family: var(--monospace-font);
-        white-space: nowrap;
-        overflow: hidden;
-        text-overflow: ellipsis;
-        margin-right: 8px;
-        display: inline-block;
-
-        .colour {
-          display: inline-block;
-          height: 10px;
-          width: 10px;
-          margin-right: 4px;
-        }
-
-        &:first-child {
-          margin-left: 8px;
-        }
-
-        &:last-child {
-          margin-right: 8px;
-        }
-
-        &:only-child {
-          width: 100%;
-        }
-
-        // Timestamp
-        &:nth-child(1) {
-          width: 13em;
-          // text-align: right;
-        }
-
-        // Name
-        &:nth-child(2) {
-          width: 24em;
-        }
-
-        // CPU
-        &:nth-child(3) {
-          width: 3em;
-        }
-
-        // Process
-        &:nth-child(4) {
-          width: 24em;
-        }
-
-        &.row-header {
-          font-weight: bold;
-        }
-      }
-    }
-  }
-}
-
 .screenshot-panel {
   height: 100%;
   img {
diff --git a/ui/src/assets/perfetto.scss b/ui/src/assets/perfetto.scss
index 8bb1cdc..567deae 100644
--- a/ui/src/assets/perfetto.scss
+++ b/ui/src/assets/perfetto.scss
@@ -56,3 +56,4 @@
 @import "widgets/hotkey";
 @import "widgets/text_paragraph";
 @import "widgets/treetable";
+@import "widgets/virtual_table";
diff --git a/ui/src/assets/widgets/virtual_table.scss b/ui/src/assets/widgets/virtual_table.scss
new file mode 100644
index 0000000..acd22d5
--- /dev/null
+++ b/ui/src/assets/widgets/virtual_table.scss
@@ -0,0 +1,89 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+@use "sass:math";
+@import "theme";
+
+// Adding these to a new layer makes other rules take precedence
+@layer widgets {
+  .pf-vtable {
+    overflow: auto;
+    font-family: $pf-font;
+    position: relative;
+    background: white; // Performance tweak - see b/335451611
+
+    .pf-vtable-content {
+      display: inline-flex;
+      flex-direction: column;
+      min-width: 100%;
+
+      .pf-vtable-header {
+        font-weight: bold;
+        position: sticky;
+        top: 0;
+        z-index: 1;
+        background: white;
+        white-space: nowrap;
+        padding-inline: 4px;
+
+        // A shadow improves distinction between header and content
+        box-shadow: #0001 0px 0px 8px;
+      }
+
+      .pf-vtable-data {
+        white-space: nowrap;
+        overflow: hidden;
+        text-overflow: ellipsis;
+        margin-right: 8px;
+        display: inline-block;
+      }
+
+      .pf-vtable-slider {
+        overflow: hidden;
+
+        // Necessary trig because we have a 45deg stripes
+        $pattern-density: 1px * math.sqrt(2);
+        $pattern-col: #ddd;
+        overflow: hidden;
+
+        background: repeating-linear-gradient(
+          -45deg,
+          $pattern-col,
+          $pattern-col $pattern-density,
+          white $pattern-density,
+          white $pattern-density * 2
+        );
+
+        .pf-vtable-puck {
+          .pf-vtable-row {
+            white-space: nowrap;
+            padding-inline: 4px;
+
+            &:nth-child(odd) {
+              background-color: hsl(214, 22%, 95%);
+            }
+
+            &:nth-child(even) {
+              background-color: white;
+            }
+
+            &:hover {
+              background-color: $table-hover-color;
+            }
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/ui/src/base/geom.ts b/ui/src/base/geom.ts
index f569400..5a20023 100644
--- a/ui/src/base/geom.ts
+++ b/ui/src/base/geom.ts
@@ -24,6 +24,11 @@
   readonly height: number;
 }
 
+export interface Vector {
+  readonly x: number;
+  readonly y: number;
+}
+
 export function intersectRects(a: Rect, b: Rect): Rect {
   return {
     top: Math.max(a.top, b.top),
@@ -57,3 +62,28 @@
     height: r.bottom - r.top,
   };
 }
+
+/**
+ * Return true if rect a contains rect b.
+ *
+ * @param a A rect.
+ * @param b Another rect.
+ * @returns True if rect a contains rect b, false otherwise.
+ */
+export function containsRect(a: Rect, b: Rect): boolean {
+  return !(
+    b.top < a.top ||
+    b.bottom > a.bottom ||
+    b.left < a.left ||
+    b.right > a.right
+  );
+}
+
+export function translateRect(a: Rect, b: Vector): Rect {
+  return {
+    top: a.top + b.y,
+    left: a.left + b.x,
+    bottom: a.bottom + b.y,
+    right: a.right + b.x,
+  };
+}
diff --git a/ui/src/common/flamegraph_util.ts b/ui/src/common/flamegraph_util.ts
index 0817ebf..acf2ee8 100644
--- a/ui/src/common/flamegraph_util.ts
+++ b/ui/src/common/flamegraph_util.ts
@@ -24,7 +24,7 @@
   id: 'showHeapGraphDominatorTree',
   name: 'Show heap graph dominator tree',
   description: 'Show dominated size and objects tabs in Java heap graph view.',
-  defaultValue: false,
+  defaultValue: true,
 });
 
 export function viewingOptions(profileType: ProfileType): Array<ViewingOption> {
diff --git a/ui/src/common/plugins.ts b/ui/src/common/plugins.ts
index ba60270..0750e2e 100644
--- a/ui/src/common/plugins.ts
+++ b/ui/src/common/plugins.ts
@@ -299,11 +299,20 @@
     },
 
     get tracks(): TrackRef[] {
-      return Object.values(globals.state.tracks).map((trackState) => {
+      const tracks = Object.values(globals.state.tracks);
+      const pinnedTracks = globals.state.pinnedTracks;
+      const groups = globals.state.trackGroups;
+      return tracks.map((trackState) => {
+        const group = trackState.trackGroup
+          ? groups[trackState.trackGroup]
+          : undefined;
         return {
           displayName: trackState.name,
           uri: trackState.uri,
           params: trackState.params,
+          key: trackState.key,
+          groupName: group?.name,
+          isPinned: pinnedTracks.includes(trackState.key),
         };
       });
     },
diff --git a/ui/src/core/default_plugins.ts b/ui/src/core/default_plugins.ts
index 583e6bc..a9f8395 100644
--- a/ui/src/core/default_plugins.ts
+++ b/ui/src/core/default_plugins.ts
@@ -32,6 +32,7 @@
   'dev.perfetto.BookmarkletApi',
   'dev.perfetto.CoreCommands',
   'dev.perfetto.LargeScreensPerf',
+  'dev.perfetto.RestorePinnedTrack',
   'perfetto.AndroidLog',
   'perfetto.Annotation',
   'perfetto.AsyncSlices',
@@ -48,7 +49,6 @@
   'perfetto.Frames',
   'perfetto.FtraceRaw',
   'perfetto.HeapProfile',
-  'perfetto.NullTrack',
   'perfetto.PerfSamplesProfile',
   'perfetto.PivotTable',
   'perfetto.ProcessSummary',
diff --git a/ui/src/core/timeline_cache.ts b/ui/src/core/timeline_cache.ts
index fe84876..a3ce96a 100644
--- a/ui/src/core/timeline_cache.ts
+++ b/ui/src/core/timeline_cache.ts
@@ -62,7 +62,12 @@
   static create(startNs: time, endNs: time, windowSizePx: number): CacheKey {
     const bucketNs =
       (endNs - startNs) / BigInt(Math.round(windowSizePx * BUCKETS_PER_PIXEL));
-    return new CacheKey(startNs, endNs, bucketNs, windowSizePx);
+    return new CacheKey(
+      startNs,
+      endNs,
+      BigintMath.max(1n, bucketNs),
+      windowSizePx,
+    );
   }
 
   private constructor(
diff --git a/ui/src/frontend/base_counter_track.ts b/ui/src/frontend/base_counter_track.ts
index caf2ef6..91b0939 100644
--- a/ui/src/frontend/base_counter_track.ts
+++ b/ui/src/frontend/base_counter_track.ts
@@ -30,6 +30,7 @@
 import {NewTrackArgs} from './track';
 import {CacheKey} from '../core/timeline_cache';
 import {featureFlags} from '../core/feature_flags';
+import {uuidv4Sql} from '../base/uuid';
 
 export const COUNTER_DEBUG_MENU_ITEMS = featureFlags.register({
   id: 'counterDebugMenuItems',
@@ -195,6 +196,7 @@
 export abstract class BaseCounterTrack implements Track {
   protected engine: EngineProxy;
   protected trackKey: string;
+  protected trackUuid = uuidv4Sql();
 
   // This is the over-skirted cached bounds:
   private countersKey: CacheKey = CacheKey.zero();
@@ -256,7 +258,7 @@
 
   constructor(args: BaseCounterTrackArgs) {
     this.engine = args.engine;
-    this.trackKey = args.trackKey.replaceAll('-', '_');
+    this.trackKey = args.trackKey;
     this.defaultOptions = args.options ?? {};
   }
 
@@ -449,6 +451,33 @@
 
   async onCreate(): Promise<void> {
     this.initState = await this.onInit();
+
+    const displayValueQuery = await this.engine.query(`
+        create virtual table ${this.getTableName()}
+        using __intrinsic_counter_mipmap((
+          SELECT
+            ts,
+            ${this.getValueExpression()} as value
+          FROM (${this.getSqlSource()})
+        ));
+
+        select
+          min_value as minDisplayValue,
+          max_value as maxDisplayValue
+        from ${this.getTableName()}(
+          trace_start(), trace_end(), trace_dur()
+        );
+      `);
+
+    const {minDisplayValue, maxDisplayValue} = displayValueQuery.firstRow({
+      minDisplayValue: NUM,
+      maxDisplayValue: NUM,
+    });
+
+    this.limits = {
+      minDisplayValue,
+      maxDisplayValue,
+    };
   }
 
   async onUpdate(): Promise<void> {
@@ -691,11 +720,14 @@
     this.hover = undefined;
   }
 
-  onDestroy(): void {
+  async onDestroy(): Promise<void> {
     if (this.initState) {
       this.initState.dispose();
       this.initState = undefined;
     }
+    if (this.engine.isAlive) {
+      await this.engine.query(`drop table if exists ${this.getTableName()}`);
+    }
   }
 
   // Compute the range of values to display and range label.
@@ -811,37 +843,11 @@
     }
   }
 
+  private getTableName(): string {
+    return `counter_${this.trackUuid}`;
+  }
+
   private async maybeRequestData(rawCountersKey: CacheKey) {
-    let limits = this.limits;
-    if (limits === undefined) {
-      const displayValueQuery = await this.engine.query(`
-        drop table if exists counter_${this.trackKey};
-
-        create virtual table counter_${this.trackKey}
-        using __intrinsic_counter_mipmap((
-          SELECT
-            ts,
-            ${this.getValueExpression()} as value
-          FROM (${this.getSqlSource()})
-        ));
-
-        select
-          min_value as minDisplayValue,
-          max_value as maxDisplayValue
-        from counter_${this.trackKey}(
-          trace_start(), trace_end(), trace_dur()
-        );
-      `);
-      const {minDisplayValue, maxDisplayValue} = displayValueQuery.firstRow({
-        minDisplayValue: NUM,
-        maxDisplayValue: NUM,
-      });
-      limits = this.limits = {
-        minDisplayValue,
-        maxDisplayValue,
-      };
-    }
-
     if (rawCountersKey.isCoveredBy(this.countersKey)) {
       return; // We have the data already, no need to re-query.
     }
@@ -859,7 +865,7 @@
         max_value as maxDisplayValue,
         last_ts as ts,
         last_value as lastDisplayValue
-      FROM counter_${this.trackKey}(
+      FROM ${this.getTableName()}(
         ${countersKey.start},
         ${countersKey.end},
         ${countersKey.bucketSize}
diff --git a/ui/src/frontend/base_slice_track.ts b/ui/src/frontend/base_slice_track.ts
index ca4fc41..329e441 100644
--- a/ui/src/frontend/base_slice_track.ts
+++ b/ui/src/frontend/base_slice_track.ts
@@ -15,7 +15,7 @@
 import {Disposable, NullDisposable} from '../base/disposable';
 import {assertExists} from '../base/logging';
 import {clamp, floatEqual} from '../base/math_utils';
-import {duration, Time, time} from '../base/time';
+import {Time, time} from '../base/time';
 import {exists} from '../base/utils';
 import {Actions} from '../common/actions';
 import {
@@ -41,6 +41,7 @@
 import {DEFAULT_SLICE_LAYOUT, SliceLayout} from './slice_layout';
 import {NewTrackArgs} from './track';
 import {BUCKETS_PER_PIXEL, CacheKey} from '../core/timeline_cache';
+import {uuidv4Sql} from '../base/uuid';
 
 // The common class that underpins all tracks drawing slices.
 
@@ -138,8 +139,10 @@
 // merges several tracks into one visual track.
 export const BASE_ROW = {
   id: NUM, // The slice ID, for selection / lookups.
-  ts: LONG, // Start time in nanoseconds.
-  dur: LONG, // Duration in nanoseconds. -1 = incomplete, 0 = instant.
+  ts: LONG, // True ts in nanoseconds.
+  dur: LONG, // True duration in nanoseconds. -1 = incomplete, 0 = instant.
+  tsQ: LONG, // Quantized start time in nanoseconds.
+  durQ: LONG, // Quantized duration in nanoseconds.
   depth: NUM, // Vertical depth.
 };
 
@@ -174,6 +177,7 @@
   protected sliceLayout: SliceLayout = {...DEFAULT_SLICE_LAYOUT};
   protected engine: EngineProxy;
   protected trackKey: string;
+  protected trackUuid = uuidv4Sql();
 
   // This is the over-skirted cached bounds:
   private slicesKey: CacheKey = CacheKey.zero();
@@ -307,6 +311,10 @@
     return `${size}px Roboto Condensed`;
   }
 
+  private getTableName(): string {
+    return `slice_${this.trackUuid}`;
+  }
+
   async onCreate(): Promise<void> {
     this.initState = await this.onInit();
 
@@ -328,7 +336,9 @@
       queryRes = await this.engine.query(`
           select
             ${this.depthColumn()},
+            ts as tsQ,
             ts,
+            -1 as durQ,
             -1 as dur,
             id
             ${extraCols ? ',' + extraCols : ''}
@@ -339,7 +349,9 @@
       queryRes = await this.engine.query(`
         select
           ${this.depthColumn()},
-          max(ts) as ts,
+          max(ts) as tsQ,
+          ts,
+          -1 as durQ,
           -1 as dur,
           id
           ${extraCols ? ',' + extraCols : ''}
@@ -357,7 +369,7 @@
     this.incomplete = incomplete;
 
     await this.engine.query(`
-      create virtual table slice_${this.trackKey}
+      create virtual table ${this.getTableName()}
       using __intrinsic_slice_mipmap((
         select id, ts, dur, ${this.depthColumn()}
         from (${this.getSqlSource()})
@@ -654,7 +666,9 @@
       this.initState.dispose();
       this.initState = undefined;
     }
-    await this.engine.execute(`drop table slice_${this.trackKey}`);
+    if (this.engine.isAlive) {
+      await this.engine.execute(`drop table ${this.getTableName()}`);
+    }
   }
 
   // This method figures out if the visible window is outside the bounds of
@@ -676,12 +690,14 @@
     const extraCols = this.extraSqlColumns.join(',');
     const queryRes = await this.engine.query(`
       SELECT
-        (z.ts / ${rawSlicesKey.bucketSize}) * ${rawSlicesKey.bucketSize} as ts,
-        iif(s.dur = -1, s.dur, max(z.dur, ${rawSlicesKey.bucketSize})) as dur,
+        (z.ts / ${rawSlicesKey.bucketSize}) * ${rawSlicesKey.bucketSize} as tsQ,
+        max(z.dur, ${rawSlicesKey.bucketSize}) as durQ,
+        s.ts as ts,
+        s.dur as dur,
         s.id,
         z.depth
         ${extraCols ? ',' + extraCols : ''}
-      FROM slice_${this.trackKey}(
+      FROM ${this.getTableName()}(
         ${slicesKey.start},
         ${slicesKey.end},
         ${slicesKey.bucketSize}
@@ -730,11 +746,6 @@
   }
 
   rowToSlice(row: T['row']): T['slice'] {
-    const startNs = Time.fromRaw(row.ts);
-    const endNs = Time.fromRaw(row.ts + row.dur);
-    const ts = Time.fromRaw(row.ts);
-    const dur: duration = row.dur;
-
     let flags = 0;
     if (row.dur === -1n) {
       flags |= SLICE_FLAGS_INCOMPLETE;
@@ -744,11 +755,11 @@
 
     return {
       id: row.id,
-      startNs,
-      endNs,
-      durNs: row.dur,
-      ts,
-      dur,
+      startNs: Time.fromRaw(row.tsQ),
+      endNs: Time.fromRaw(row.tsQ + row.durQ),
+      durNs: row.durQ,
+      ts: Time.fromRaw(row.ts),
+      dur: row.dur,
       flags,
       depth: row.depth,
       title: '',
diff --git a/ui/src/frontend/css_constants.ts b/ui/src/frontend/css_constants.ts
index 1a37c1e..f756136 100644
--- a/ui/src/frontend/css_constants.ts
+++ b/ui/src/frontend/css_constants.ts
@@ -23,7 +23,6 @@
 export let SELECTION_FILL_COLOR = '#8398e64d';
 export let OVERVIEW_TIMELINE_NON_VISIBLE_COLOR = '#c8c8c8cc';
 export let DEFAULT_DETAILS_CONTENT_HEIGHT = 280;
-export const SELECTED_LOG_ROWS_COLOR = '#D2EFE0';
 export let BACKGROUND_COLOR = '#ffffff';
 export let FOREGROUND_COLOR = '#222';
 export let COLLAPSED_BACKGROUND = '#ffffff';
diff --git a/ui/src/frontend/simple_counter_track.ts b/ui/src/frontend/simple_counter_track.ts
index 084c14f..361480b 100644
--- a/ui/src/frontend/simple_counter_track.ts
+++ b/ui/src/frontend/simple_counter_track.ts
@@ -17,6 +17,7 @@
 import {BaseCounterTrack, CounterOptions} from './base_counter_track';
 import {CounterColumns, SqlDataSource} from './debug_tracks';
 import {Disposable, DisposableCallback} from '../base/disposable';
+import {uuidv4Sql} from '../base/uuid';
 
 export type SimpleCounterTrackConfig = {
   data: SqlDataSource;
@@ -39,7 +40,7 @@
       options: config.options,
     });
     this.config = config;
-    this.sqlTableName = `__simple_counter_${this.trackKey}`;
+    this.sqlTableName = `__simple_counter_${uuidv4Sql()}`;
   }
 
   async onInit(): Promise<Disposable> {
@@ -74,7 +75,7 @@
 
   private async dropTrackTable(): Promise<void> {
     if (this.engine.isAlive) {
-      this.engine.query(`drop table if exists ${this.sqlTableName}`);
+      await this.engine.query(`drop table if exists ${this.sqlTableName}`);
     }
   }
 }
diff --git a/ui/src/frontend/widgets_page.ts b/ui/src/frontend/widgets_page.ts
index ea80972..65fb04e 100644
--- a/ui/src/frontend/widgets_page.ts
+++ b/ui/src/frontend/widgets_page.ts
@@ -50,6 +50,11 @@
 import {TableShowcase} from './tables/table_showcase';
 import {TreeTable, TreeTableAttrs} from './widgets/treetable';
 import {Intent} from '../widgets/common';
+import {
+  VirtualTable,
+  VirtualTableAttrs,
+  VirtualTableRow,
+} from '../widgets/virtual_table';
 
 const DATA_ENGLISH_LETTER_FREQUENCY = {
   table: [
@@ -569,6 +574,11 @@
   },
 ];
 
+let virtualTableData: {offset: number; rows: VirtualTableRow[]} = {
+  offset: 0,
+  rows: [],
+};
+
 export const WidgetsPage = createPage({
   view() {
     return m(
@@ -1154,10 +1164,38 @@
           return m(TreeTable<File>, attrs);
         },
       }),
+      m(WidgetShowcase, {
+        label: 'VirtualTable',
+        description: `Virtualized table for efficient rendering of large datasets`,
+        renderWidget: () => {
+          const attrs: VirtualTableAttrs = {
+            columns: [
+              {header: 'x', width: '4em'},
+              {header: 'x^2', width: '8em'},
+            ],
+            rows: virtualTableData.rows,
+            firstRowOffset: virtualTableData.offset,
+            rowHeight: 20,
+            numRows: 500_000,
+            style: {height: '200px'},
+            onReload: (rowOffset, rowCount) => {
+              const rows = [];
+              for (let i = rowOffset; i < rowOffset + rowCount; i++) {
+                rows.push({id: i, cells: [i, i ** 2]});
+              }
+              virtualTableData = {
+                offset: rowOffset,
+                rows,
+              };
+              raf.scheduleFullRedraw();
+            },
+          };
+          return m(VirtualTable, attrs);
+        },
+      }),
     );
   },
 });
-
 class ModalShowcase implements m.ClassComponent {
   private static counter = 0;
 
diff --git a/ui/src/plugins/dev.perfetto.AndroidCujs/index.ts b/ui/src/plugins/dev.perfetto.AndroidCujs/index.ts
index 2c13c76..aaef484 100644
--- a/ui/src/plugins/dev.perfetto.AndroidCujs/index.ts
+++ b/ui/src/plugins/dev.perfetto.AndroidCujs/index.ts
@@ -19,6 +19,7 @@
 const JANK_CUJ_QUERY_PRECONDITIONS = `
   SELECT RUN_METRIC('android/android_jank_cuj.sql');
   SELECT RUN_METRIC('android/jank/internal/counters.sql');
+  INCLUDE PERFETTO MODULE android.critical_blocking_calls;
 `;
 
 const JANK_CUJ_QUERY = `
@@ -64,15 +65,17 @@
       sf_callback_missed_frames,
       hwui_callback_missed_frames,
       cuj_layer.layer_name,
-      cuj.ts,
-      cuj.dur,
+      /* Boundaries table doesn't contain ts and dur when a CUJ didn't complete successfully.
+        In that case we still want to show that it was canceled, so let's take the slice timestamps. */
+      CASE WHEN boundaries.ts IS NOT NULL THEN boundaries.ts ELSE cuj.ts END AS ts,
+      CASE WHEN boundaries.dur IS NOT NULL THEN boundaries.dur ELSE cuj.dur END AS dur,
       cuj.track_id,
       cuj.slice_id
     FROM slice AS cuj
-           JOIN process_track AS pt
-                ON cuj.track_id = pt.id
+           JOIN process_track AS pt ON cuj.track_id = pt.id
            LEFT JOIN android_jank_cuj jc
                      ON pt.upid = jc.upid AND cuj.name = jc.cuj_slice_name AND cuj.ts = jc.ts
+           LEFT JOIN android_jank_cuj_main_thread_cuj_boundary boundaries using (cuj_id)
            LEFT JOIN android_jank_cuj_layer_name cuj_layer USING (cuj_id)
            LEFT JOIN android_jank_cuj_counter_metrics USING (cuj_id)
     WHERE cuj.name GLOB 'J<*>'
@@ -125,6 +128,42 @@
 `;
 
 const LATENCY_COLUMNS = ['name', 'dur_ms', 'ts', 'dur', 'track_id', 'slice_id'];
+
+const BLOCKING_CALLS_DURING_CUJS_QUERY = `
+    SELECT
+      s.id AS slice_id,
+      s.name,
+      max(s.ts, cuj.ts) AS ts,
+      min(s.ts + s.dur, cuj.ts_end) as ts_end,
+      min(s.ts + s.dur, cuj.ts_end) - max(s.ts, cuj.ts) AS dur,
+      cuj.cuj_id,
+      cuj.cuj_name,
+      s.process_name,
+      s.upid,
+      s.utid,
+      'slice' AS table_name
+    FROM _android_critical_blocking_calls s
+      JOIN  android_jank_cuj cuj
+      -- only when there is an overlap
+      ON s.ts + s.dur > cuj.ts AND s.ts < cuj.ts_end
+          -- and are from the same process
+          AND s.upid = cuj.upid
+`;
+
+const BLOCKING_CALLS_DURING_CUJS_COLUMNS = [
+  'slice_id',
+  'name',
+  'ts',
+  'cuj_ts',
+  'dur',
+  'cuj_id',
+  'cuj_name',
+  'process_name',
+  'upid',
+  'utid',
+  'table_name',
+];
+
 class AndroidCujs implements Plugin {
   async onTraceLoad(ctx: PluginContextTrace): Promise<void> {
     ctx.registerCommand({
@@ -140,7 +179,7 @@
             },
             'Jank CUJs',
             {ts: 'ts', dur: 'dur', name: 'name'},
-            [],
+            JANK_COLUMNS,
           );
         });
       },
@@ -179,6 +218,25 @@
       callback: () =>
         ctx.tabs.openQuery(LATENCY_CUJ_QUERY, 'Android Latency CUJs'),
     });
+
+    ctx.registerCommand({
+      id: 'dev.perfetto.AndroidCujs#PinBlockingCalls',
+      name: 'Add track: Android Blocking calls during CUJs',
+      callback: () => {
+        runQuery(JANK_CUJ_QUERY_PRECONDITIONS, ctx.engine).then(() =>
+          addDebugSliceTrack(
+            ctx.engine,
+            {
+              sqlSource: BLOCKING_CALLS_DURING_CUJS_QUERY,
+              columns: BLOCKING_CALLS_DURING_CUJS_COLUMNS,
+            },
+            'Blocking calls during CUJs',
+            {ts: 'ts', dur: 'dur', name: 'name'},
+            BLOCKING_CALLS_DURING_CUJS_COLUMNS,
+          ),
+        );
+      },
+    });
   }
 }
 
diff --git a/ui/src/plugins/dev.perfetto.RestorePinnedTracks/OWNERS b/ui/src/plugins/dev.perfetto.RestorePinnedTracks/OWNERS
new file mode 100644
index 0000000..987684d
--- /dev/null
+++ b/ui/src/plugins/dev.perfetto.RestorePinnedTracks/OWNERS
@@ -0,0 +1,2 @@
+nicomazz@google.com
+nickchameyev@google.com
diff --git a/ui/src/plugins/dev.perfetto.RestorePinnedTracks/index.ts b/ui/src/plugins/dev.perfetto.RestorePinnedTracks/index.ts
new file mode 100644
index 0000000..81036db
--- /dev/null
+++ b/ui/src/plugins/dev.perfetto.RestorePinnedTracks/index.ts
@@ -0,0 +1,135 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {
+  Plugin,
+  PluginContext,
+  PluginContextTrace,
+  PluginDescriptor,
+  TrackRef,
+} from '../../public';
+
+const PLUGIN_ID = 'dev.perfetto.RestorePinnedTrack';
+const SAVED_TRACKS_KEY = `${PLUGIN_ID}#savedPerfettoTracks`;
+
+/**
+ * Fuzzy save and restore of pinned tracks.
+ *
+ * Tries to persist pinned tracks. Uses full string matching between track name
+ * and group name. When no match is found for a saved track, it tries again
+ * without numbers.
+ */
+class RestorePinnedTrack implements Plugin {
+  onActivate(_ctx: PluginContext): void {}
+
+  private ctx!: PluginContextTrace;
+
+  async onTraceLoad(ctx: PluginContextTrace): Promise<void> {
+    this.ctx = ctx;
+    ctx.registerCommand({
+      id: `${PLUGIN_ID}#save`,
+      name: 'Save: Pinned tracks',
+      callback: () => {
+        this.saveTracks();
+      },
+    });
+    ctx.registerCommand({
+      id: `${PLUGIN_ID}#restore`,
+      name: 'Restore: Pinned tracks',
+      callback: () => {
+        this.restoreTracks();
+      },
+    });
+  }
+
+  private saveTracks() {
+    const pinnedTracks = this.ctx.timeline.tracks.filter(
+      (trackRef) => trackRef.isPinned,
+    );
+    const tracksToSave: SavedPinnedTrack[] = pinnedTracks.map((trackRef) => ({
+      groupName: trackRef.groupName,
+      trackName: trackRef.displayName,
+    }));
+    window.localStorage.setItem(SAVED_TRACKS_KEY, JSON.stringify(tracksToSave));
+  }
+
+  private restoreTracks() {
+    const savedTracks = window.localStorage.getItem(SAVED_TRACKS_KEY);
+    if (!savedTracks) {
+      alert('No saved tracks. Use the Save command first');
+      return;
+    }
+    const tracksToRestore: SavedPinnedTrack[] = JSON.parse(savedTracks);
+    const tracks: TrackRef[] = this.ctx.timeline.tracks;
+    tracksToRestore.forEach((trackToRestore) => {
+      // Check for an exact match
+      const exactMatch = tracks.find((track) => {
+        return (
+          track.key &&
+          trackToRestore.trackName === track.displayName &&
+          trackToRestore.groupName === track.groupName
+        );
+      });
+
+      if (exactMatch) {
+        this.ctx.timeline.pinTrack(exactMatch.key!);
+      } else {
+        // We attempt a match after removing numbers to potentially pin a
+        // "similar" track from a different trace. Removing numbers allows
+        // flexibility; for instance, with multiple 'sysui' processes (e.g.
+        // track group name: "com.android.systemui 123") without this approach,
+        // any could be mistakenly pinned. The goal is to restore specific
+        // tracks within the same trace, ensuring that a previously pinned track
+        // is pinned again.
+        // If the specific process with that PID is unavailable, pinning any
+        // other process matching the package name is attempted.
+        const fuzzyMatch = tracks.find((track) => {
+          return (
+            track.key &&
+            this.removeNumbers(trackToRestore.trackName) ===
+              this.removeNumbers(track.displayName) &&
+            this.removeNumbers(trackToRestore.groupName) ===
+              this.removeNumbers(track.groupName)
+          );
+        });
+
+        if (fuzzyMatch) {
+          this.ctx.timeline.pinTrack(fuzzyMatch.key!);
+        } else {
+          console.warn(
+            '[RestorePinnedTracks] No track found that matches',
+            trackToRestore,
+          );
+        }
+      }
+    });
+  }
+
+  private removeNumbers(inputString?: string): string | undefined {
+    return inputString?.replace(/\d+/g, '');
+  }
+}
+
+interface SavedPinnedTrack {
+  // Optional: group name for the track. Usually matches with process name.
+  groupName?: string;
+
+  // Track name to restore.
+  trackName: string;
+}
+
+export const plugin: PluginDescriptor = {
+  pluginId: PLUGIN_ID,
+  plugin: RestorePinnedTrack,
+};
diff --git a/ui/src/public/index.ts b/ui/src/public/index.ts
index ece6ea0..f3e4338 100644
--- a/ui/src/public/index.ts
+++ b/ui/src/public/index.ts
@@ -482,6 +482,12 @@
 
   // Optional: Add tracks to a group with this name.
   groupName?: string;
+
+  // Optional: Track key
+  key?: string;
+
+  // Optional: Whether the track is pinned
+  isPinned?: boolean;
 }
 
 // A predicate for selecting a subset of tracks.
diff --git a/ui/src/tracks/android_log/logs_panel.ts b/ui/src/tracks/android_log/logs_panel.ts
index 51e0889..3970190 100644
--- a/ui/src/tracks/android_log/logs_panel.ts
+++ b/ui/src/tracks/android_log/logs_panel.ts
@@ -18,12 +18,10 @@
 import {Actions} from '../../common/actions';
 import {raf} from '../../core/raf_scheduler';
 import {DetailsShell} from '../../widgets/details_shell';
-import {VirtualScrollContainer} from '../../widgets/virtual_scroll_container';
 
-import {SELECTED_LOG_ROWS_COLOR} from '../../frontend/css_constants';
 import {globals} from '../../frontend/globals';
 import {Timestamp} from '../../frontend/widgets/timestamp';
-import {createStore, EngineProxy, LONG, NUM, Store, STR} from '../../public';
+import {EngineProxy, LONG, NUM, NUM_NULL, Store, STR} from '../../public';
 import {Monitor} from '../../base/monitor';
 import {AsyncLimiter} from '../../base/async_limiter';
 import {escapeGlob, escapeQuery} from '../../trace_processor/query_utils';
@@ -31,6 +29,8 @@
 import {Button} from '../../widgets/button';
 import {TextInput} from '../../widgets/text_input';
 import {Intent} from '../../widgets/common';
+import {VirtualTable, VirtualTableRow} from '../../widgets/virtual_table';
+import {classNames} from '../../base/classnames';
 
 const ROW_H = 20;
 
@@ -63,15 +63,12 @@
 }
 
 export class LogPanel implements m.ClassComponent<LogPanelAttrs> {
-  private readonly SKIRT_SIZE = 50;
   private entries?: LogEntries;
-  private isStale = true;
-  private viewportBounds = {top: 0, bottom: 0};
 
-  private readonly paginationStore = createStore<Pagination>({
+  private pagination: Pagination = {
     offset: 0,
     count: 0,
-  });
+  };
   private readonly rowsMonitor: Monitor;
   private readonly filterMonitor: Monitor;
   private readonly queryLimiter = new AsyncLimiter();
@@ -81,7 +78,6 @@
       () => attrs.filterStore.state,
       () => globals.state.frontendLocalState.visibleState.start,
       () => globals.state.frontendLocalState.visibleState.end,
-      () => this.paginationStore.state,
     ]);
 
     this.filterMonitor = new Monitor([() => attrs.filterStore.state]);
@@ -89,148 +85,104 @@
 
   view({attrs}: m.CVnode<LogPanelAttrs>) {
     if (this.rowsMonitor.ifStateChanged()) {
-      this.queryLimiter.schedule(async () => {
-        this.isStale = true;
-        raf.scheduleFullRedraw();
-
-        const visibleState = globals.state.frontendLocalState.visibleState;
-        const visibleSpan = new TimeSpan(visibleState.start, visibleState.end);
-
-        if (this.filterMonitor.ifStateChanged()) {
-          await updateLogView(attrs.engine, attrs.filterStore.state);
-        }
-
-        this.entries = await updateLogEntries(
-          attrs.engine,
-          visibleSpan,
-          this.paginationStore.state,
-        );
-
-        raf.scheduleFullRedraw();
-        this.isStale = false;
-      });
+      this.reloadData(attrs);
     }
 
     const hasProcessNames =
       this.entries &&
       this.entries.processName.filter((name) => name).length > 0;
+    const totalEvents = this.entries?.totalEvents ?? 0;
 
-    const rows: m.Children = [];
-    rows.push(
-      m(
-        `.row`,
-        m('.cell.row-header', 'Timestamp'),
-        m('.cell.row-header', 'Level'),
-        m('.cell.row-header', 'Tag'),
-        hasProcessNames
-          ? m('.cell.with-process.row-header', 'Process name')
-          : undefined,
-        hasProcessNames
-          ? m('.cell.with-process.row-header', 'Message')
-          : m('.cell.no-process.row-header', 'Message'),
-        m('br'),
-      ),
-    );
-    if (this.entries) {
-      const offset = this.entries.offset;
-      const timestamps = this.entries.timestamps;
-      const priorities = this.entries.priorities;
-      const tags = this.entries.tags;
-      const messages = this.entries.messages;
-      const processNames = this.entries.processName;
-      const totalEvents = this.entries.totalEvents;
-
-      for (let i = 0; i < this.entries.timestamps.length; i++) {
-        const priorityLetter = LOG_PRIORITIES[priorities[i]][0];
-        const ts = timestamps[i];
-        const prioClass = priorityLetter || '';
-        const style: {top: string; backgroundColor?: string} = {
-          // 1.5 is for the width of the header
-          top: `${(offset + i + 1.5) * ROW_H}px`,
-        };
-        if (this.entries.isHighlighted[i]) {
-          style.backgroundColor = SELECTED_LOG_ROWS_COLOR;
-        }
-
-        rows.push(
-          m(
-            `.row.${prioClass}`,
-            {
-              class: this.isStale ? 'stale' : '',
-              style,
-              onmouseover: () => {
-                globals.dispatch(Actions.setHoverCursorTimestamp({ts}));
-              },
-              onmouseout: () => {
-                globals.dispatch(
-                  Actions.setHoverCursorTimestamp({ts: Time.INVALID}),
-                );
-              },
-            },
-            m('.cell', m(Timestamp, {ts})),
-            m('.cell', priorityLetter || '?'),
-            m('.cell', tags[i]),
-            hasProcessNames
-              ? m('.cell.with-process', processNames[i])
-              : undefined,
-            hasProcessNames
-              ? m('.cell.with-process', messages[i])
-              : m('.cell.no-process', messages[i]),
-            m('br'),
-          ),
-        );
-      }
-
-      return m(
-        DetailsShell,
-        {
-          title: 'Android Logs',
-          description: `[${this.viewportBounds.top}, ${this.viewportBounds.bottom}] / ${totalEvents}`,
-          buttons: m(LogsFilters, {store: attrs.filterStore}),
+    return m(
+      DetailsShell,
+      {
+        title: 'Android Logs',
+        description: `Total messages: ${totalEvents}`,
+        buttons: m(LogsFilters, {store: attrs.filterStore}),
+      },
+      m(VirtualTable, {
+        className: 'pf-android-logs-table',
+        columns: [
+          {header: 'Timestamp', width: '7rem'},
+          {header: 'Level', width: '4rem'},
+          {header: 'Tag', width: '13rem'},
+          ...(hasProcessNames ? [{header: 'Process', width: '18rem'}] : []),
+          {header: 'Message', width: '42rem'},
+        ],
+        rows: this.renderRows(hasProcessNames),
+        firstRowOffset: this.entries?.offset ?? 0,
+        numRows: this.entries?.totalEvents ?? 0,
+        rowHeight: ROW_H,
+        onReload: (offset, count) => {
+          this.pagination = {offset, count};
+          this.reloadData(attrs);
         },
-        m(
-          VirtualScrollContainer,
-          {
-            onScroll: (scrollContainer: HTMLElement) => {
-              this.recomputeVisibleRowsAndUpdate(scrollContainer);
-              raf.scheduleFullRedraw();
-            },
-          },
-          m(
-            '.log-panel',
-            m('.rows', {style: {height: `${totalEvents * ROW_H}px`}}, rows),
-          ),
-        ),
-      );
-    }
-
-    return null;
+        onRowHover: (id) => {
+          const timestamp = this.entries?.timestamps[id];
+          if (timestamp !== undefined) {
+            globals.dispatch(Actions.setHoverCursorTimestamp({ts: timestamp}));
+          }
+        },
+        onRowOut: () => {
+          globals.dispatch(Actions.setHoverCursorTimestamp({ts: Time.INVALID}));
+        },
+      }),
+    );
   }
 
-  recomputeVisibleRowsAndUpdate(scrollContainer: HTMLElement) {
-    const viewportTop = Math.floor(scrollContainer.scrollTop / ROW_H);
-    const viewportHeight = Math.ceil(scrollContainer.clientHeight / ROW_H);
-    const viewportBottom = viewportTop + viewportHeight;
+  private reloadData(attrs: LogPanelAttrs) {
+    this.queryLimiter.schedule(async () => {
+      const visibleState = globals.state.frontendLocalState.visibleState;
+      const visibleSpan = new TimeSpan(visibleState.start, visibleState.end);
 
-    this.viewportBounds = {
-      top: viewportTop,
-      bottom: viewportBottom,
-    };
+      if (this.filterMonitor.ifStateChanged()) {
+        await updateLogView(attrs.engine, attrs.filterStore.state);
+      }
 
-    const curPage = this.paginationStore.state;
+      this.entries = await updateLogEntries(
+        attrs.engine,
+        visibleSpan,
+        this.pagination,
+      );
 
-    if (
-      viewportTop < curPage.offset ||
-      viewportBottom >= curPage.offset + curPage.count
-    ) {
-      this.paginationStore.edit((draft) => {
-        const offset = Math.max(0, viewportTop - this.SKIRT_SIZE);
-        // Make it even so alternating coloured rows line up
-        const offsetEven = Math.floor(offset / 2) * 2;
-        draft.offset = offsetEven;
-        draft.count = viewportHeight + this.SKIRT_SIZE * 2;
+      raf.scheduleFullRedraw();
+    });
+  }
+
+  private renderRows(hasProcessNames: boolean | undefined): VirtualTableRow[] {
+    if (!this.entries) {
+      return [];
+    }
+
+    const timestamps = this.entries.timestamps;
+    const priorities = this.entries.priorities;
+    const tags = this.entries.tags;
+    const messages = this.entries.messages;
+    const processNames = this.entries.processName;
+
+    const rows: VirtualTableRow[] = [];
+    for (let i = 0; i < this.entries.timestamps.length; i++) {
+      const priorityLetter = LOG_PRIORITIES[priorities[i]][0];
+      const ts = timestamps[i];
+      const prioClass = priorityLetter || '';
+
+      rows.push({
+        id: i,
+        className: classNames(
+          prioClass,
+          this.entries.isHighlighted[i] && 'pf-highlighted',
+        ),
+        cells: [
+          m(Timestamp, {ts}),
+          priorityLetter || '?',
+          tags[i],
+          ...(hasProcessNames ? [processNames[i]] : []),
+          messages[i],
+        ],
       });
     }
+
+    return rows;
   }
 }
 
@@ -460,7 +412,7 @@
     prio: NUM,
     tag: STR,
     msg: STR,
-    isMsgHighlighted: NUM,
+    isMsgHighlighted: NUM_NULL,
     isProcessHighlighted: NUM,
     processName: STR,
   });
diff --git a/ui/src/tracks/cpu_freq/index.ts b/ui/src/tracks/cpu_freq/index.ts
index 92e30f9..b5fa5fa 100644
--- a/ui/src/tracks/cpu_freq/index.ts
+++ b/ui/src/tracks/cpu_freq/index.ts
@@ -31,6 +31,7 @@
   Track,
 } from '../../public';
 import {LONG, NUM, NUM_NULL} from '../../trace_processor/query_result';
+import {uuidv4Sql} from '../../base/uuid';
 
 export const CPU_FREQ_TRACK_KIND = 'CpuFreqTrack';
 
@@ -63,30 +64,29 @@
 
   private engine: EngineProxy;
   private config: Config;
-  private trackKey: string;
+  private trackUuid = uuidv4Sql();
 
-  constructor(config: Config, engine: EngineProxy, trackKey: string) {
+  constructor(config: Config, engine: EngineProxy) {
     this.config = config;
     this.engine = engine;
-    this.trackKey = trackKey.split('-').join('_');
   }
 
   async onCreate() {
     if (this.config.idleTrackId === undefined) {
       await this.engine.execute(`
-        create view raw_freq_idle_${this.trackKey} as
+        create view raw_freq_idle_${this.trackUuid} as
         select ts, dur, value as freqValue, -1 as idleValue
         from experimental_counter_dur c
         where track_id = ${this.config.freqTrackId}
       `);
     } else {
       await this.engine.execute(`
-        create view raw_freq_${this.trackKey} as
+        create view raw_freq_${this.trackUuid} as
         select ts, dur, value as freqValue
         from experimental_counter_dur c
         where track_id = ${this.config.freqTrackId};
 
-        create view raw_idle_${this.trackKey} as
+        create view raw_idle_${this.trackUuid} as
         select
           ts,
           dur,
@@ -94,22 +94,22 @@
         from experimental_counter_dur c
         where track_id = ${this.config.idleTrackId};
 
-        create virtual table raw_freq_idle_${this.trackKey}
-        using span_join(raw_freq_${this.trackKey}, raw_idle_${this.trackKey});
+        create virtual table raw_freq_idle_${this.trackUuid}
+        using span_join(raw_freq_${this.trackUuid}, raw_idle_${this.trackUuid});
       `);
     }
 
     await this.engine.execute(`
-      create virtual table cpu_freq_${this.trackKey}
+      create virtual table cpu_freq_${this.trackUuid}
       using __intrinsic_counter_mipmap((
         select ts, freqValue as value
-        from raw_freq_idle_${this.trackKey}
+        from raw_freq_idle_${this.trackUuid}
       ));
 
-      create virtual table cpu_idle_${this.trackKey}
+      create virtual table cpu_idle_${this.trackUuid}
       using __intrinsic_counter_mipmap((
         select ts, idleValue as value
-        from raw_freq_idle_${this.trackKey}
+        from raw_freq_idle_${this.trackUuid}
       ));
     `);
   }
@@ -120,11 +120,11 @@
 
   async onDestroy(): Promise<void> {
     if (this.engine.isAlive) {
-      await this.engine.query(`drop table cpu_freq_${this.trackKey}`);
-      await this.engine.query(`drop table cpu_idle_${this.trackKey}`);
-      await this.engine.query(`drop table raw_freq_idle_${this.trackKey}`);
-      await this.engine.query(`drop view if exists raw_freq_${this.trackKey}`);
-      await this.engine.query(`drop view if exists raw_idle_${this.trackKey}`);
+      await this.engine.query(`drop table cpu_freq_${this.trackUuid}`);
+      await this.engine.query(`drop table cpu_idle_${this.trackUuid}`);
+      await this.engine.query(`drop table raw_freq_idle_${this.trackUuid}`);
+      await this.engine.query(`drop view if exists raw_freq_${this.trackUuid}`);
+      await this.engine.query(`drop view if exists raw_idle_${this.trackUuid}`);
     }
   }
 
@@ -143,7 +143,7 @@
         max_value as maxFreq,
         last_ts as ts,
         last_value as lastFreq
-      FROM cpu_freq_${this.trackKey}(
+      FROM cpu_freq_${this.trackUuid}(
         ${start},
         ${end},
         ${resolution}
@@ -151,7 +151,7 @@
     `);
     const idleResult = await this.engine.query(`
       SELECT last_value as lastIdle
-      FROM cpu_idle_${this.trackKey}(
+      FROM cpu_idle_${this.trackUuid}(
         ${start},
         ${end},
         ${resolution}
@@ -450,7 +450,7 @@
           displayName: `Cpu ${cpu} Frequency`,
           kind: CPU_FREQ_TRACK_KIND,
           cpu,
-          trackFactory: (c) => new CpuFreqTrack(config, ctx.engine, c.trackKey),
+          trackFactory: () => new CpuFreqTrack(config, ctx.engine),
         });
       }
     }
diff --git a/ui/src/tracks/cpu_slices/index.ts b/ui/src/tracks/cpu_slices/index.ts
index 552092c..5c042dc 100644
--- a/ui/src/tracks/cpu_slices/index.ts
+++ b/ui/src/tracks/cpu_slices/index.ts
@@ -40,6 +40,7 @@
   Track,
 } from '../../public';
 import {LONG, NUM, STR_NULL} from '../../trace_processor/query_result';
+import {uuidv4Sql} from '../../base/uuid';
 
 export const CPU_SLICE_TRACK_KIND = 'CpuSliceTrack';
 
@@ -69,6 +70,7 @@
   private engine: EngineProxy;
   private cpu: number;
   private trackKey: string;
+  private trackUuid = uuidv4Sql();
 
   constructor(engine: EngineProxy, trackKey: string, cpu: number) {
     this.engine = engine;
@@ -78,7 +80,7 @@
 
   async onCreate() {
     await this.engine.query(`
-      create virtual table cpu_slice_${this.trackKey}
+      create virtual table cpu_slice_${this.trackUuid}
       using __intrinsic_slice_mipmap((
         select
           id,
@@ -116,7 +118,7 @@
         s.id,
         s.dur = -1 as isIncomplete,
         ifnull(s.priority < 100, 0) as isRealtime
-      from cpu_slice_${this.trackKey}(${start}, ${end}, ${resolution}) z
+      from cpu_slice_${this.trackUuid}(${start}, ${end}, ${resolution}) z
       cross join sched s using (id)
     `);
 
@@ -165,7 +167,7 @@
   async onDestroy() {
     if (this.engine.isAlive) {
       await this.engine.query(
-        `drop table if exists cpu_slice_${this.trackKey}`,
+        `drop table if exists cpu_slice_${this.trackUuid}`,
       );
     }
     this.fetcher.dispose();
diff --git a/ui/src/tracks/ftrace/ftrace_explorer.ts b/ui/src/tracks/ftrace/ftrace_explorer.ts
index 9481b86..5e7eb32 100644
--- a/ui/src/tracks/ftrace/ftrace_explorer.ts
+++ b/ui/src/tracks/ftrace/ftrace_explorer.ts
@@ -24,27 +24,18 @@
   PopupMultiSelect,
 } from '../../widgets/multiselect';
 import {PopupPosition} from '../../widgets/popup';
-import {VirtualScrollContainer} from '../../widgets/virtual_scroll_container';
 
 import {globals} from '../../frontend/globals';
 import {Timestamp} from '../../frontend/widgets/timestamp';
 import {FtraceFilter, FtraceStat} from './common';
-import {
-  createStore,
-  EngineProxy,
-  LONG,
-  NUM,
-  Store,
-  STR,
-  STR_NULL,
-} from '../../public';
+import {EngineProxy, LONG, NUM, Store, STR, STR_NULL} from '../../public';
 import {raf} from '../../core/raf_scheduler';
 import {AsyncLimiter} from '../../base/async_limiter';
 import {Monitor} from '../../base/monitor';
 import {Button} from '../../widgets/button';
+import {VirtualTable, VirtualTableRow} from '../../widgets/virtual_table';
 
 const ROW_H = 20;
-const PAGE_SIZE = 250;
 
 interface FtraceExplorerAttrs {
   cache: FtraceExplorerCache;
@@ -69,8 +60,8 @@
 }
 
 interface Pagination {
-  page: number;
-  pageCount: number;
+  offset: number;
+  count: number;
 }
 
 export interface FtraceExplorerCache {
@@ -104,10 +95,10 @@
 }
 
 export class FtraceExplorer implements m.ClassComponent<FtraceExplorerAttrs> {
-  private readonly paginationStore = createStore<Pagination>({
-    page: 0,
-    pageCount: 0,
-  });
+  private pagination: Pagination = {
+    offset: 0,
+    count: 0,
+  };
   private readonly monitor: Monitor;
   private readonly queryLimiter = new AsyncLimiter();
 
@@ -119,7 +110,6 @@
       () => globals.state.frontendLocalState.visibleState.start,
       () => globals.state.frontendLocalState.visibleState.end,
       () => attrs.filterStore.state,
-      () => this.paginationStore.state,
     ]);
 
     if (attrs.cache.state === 'blank') {
@@ -136,60 +126,85 @@
   }
 
   view({attrs}: m.CVnode<FtraceExplorerAttrs>) {
-    this.monitor.ifStateChanged(() =>
-      this.queryLimiter.schedule(async () => {
-        this.data = await lookupFtraceEvents(
-          attrs.engine,
-          this.paginationStore.state.page * PAGE_SIZE,
-          this.paginationStore.state.pageCount * PAGE_SIZE,
-          attrs.filterStore.state,
-        );
-        raf.scheduleFullRedraw();
-      }),
-    );
+    this.monitor.ifStateChanged(() => {
+      this.reloadData(attrs);
+    });
 
     return m(
       DetailsShell,
       {
         title: this.renderTitle(),
         buttons: this.renderFilterPanel(attrs),
+        fillParent: true,
       },
-      m(
-        VirtualScrollContainer,
-        {
-          onScroll: this.onScroll.bind(this),
+      m(VirtualTable, {
+        className: 'pf-ftrace-explorer',
+        columns: [
+          {header: 'ID', width: '5em'},
+          {header: 'Timestamp', width: '13em'},
+          {header: 'Name', width: '24em'},
+          {header: 'CPU', width: '3em'},
+          {header: 'Process', width: '24em'},
+          {header: 'Args', width: '200em'},
+        ],
+        firstRowOffset: this.data?.offset ?? 0,
+        numRows: this.data?.numEvents ?? 0,
+        rowHeight: ROW_H,
+        rows: this.renderData(),
+        onReload: (offset, count) => {
+          this.pagination = {offset, count};
+          this.reloadData(attrs);
         },
-        m('.ftrace-panel', this.renderRows()),
-      ),
+        onRowHover: this.onRowOver.bind(this),
+        onRowOut: this.onRowOut.bind(this),
+      }),
     );
   }
 
-  onScroll(scrollContainer: HTMLElement) {
-    const paginationState = this.paginationStore.state;
-    const prevPage = paginationState.page;
-    const prevPageCount = paginationState.pageCount;
-
-    const visibleRowOffset = Math.floor(scrollContainer.scrollTop / ROW_H);
-    const visibleRowCount = Math.ceil(scrollContainer.clientHeight / ROW_H);
-
-    // Work out which "page" we're on
-    const page = Math.max(0, Math.floor(visibleRowOffset / PAGE_SIZE) - 1);
-    const pageCount = Math.ceil(visibleRowCount / PAGE_SIZE) + 2;
-
-    if (page !== prevPage || pageCount !== prevPageCount) {
-      this.paginationStore.edit((draft) => {
-        draft.page = page;
-        draft.pageCount = pageCount;
-      });
+  private reloadData(attrs: FtraceExplorerAttrs): void {
+    this.queryLimiter.schedule(async () => {
+      this.data = await lookupFtraceEvents(
+        attrs.engine,
+        this.pagination.offset,
+        this.pagination.count,
+        attrs.filterStore.state,
+      );
       raf.scheduleFullRedraw();
+    });
+  }
+
+  private renderData(): VirtualTableRow[] {
+    if (!this.data) {
+      return [];
+    }
+
+    return this.data.events.map((event) => {
+      const {ts, name, cpu, process, args, id} = event;
+      const timestamp = m(Timestamp, {ts});
+      const color = colorForFtrace(name).base.cssString;
+
+      return {
+        id,
+        cells: [
+          id,
+          timestamp,
+          m('', m('span.colour', {style: {background: color}}), name),
+          cpu,
+          process,
+          args,
+        ],
+      };
+    });
+  }
+
+  private onRowOver(id: number) {
+    const event = this.data?.events.find((event) => event.id === id);
+    if (event) {
+      globals.dispatch(Actions.setHoverCursorTimestamp({ts: event.ts}));
     }
   }
 
-  onRowOver(ts: time) {
-    globals.dispatch(Actions.setHoverCursorTimestamp({ts}));
-  }
-
-  onRowOut() {
+  private onRowOut() {
     globals.dispatch(Actions.setHoverCursorTimestamp({ts: Time.INVALID}));
   }
 
@@ -242,55 +257,6 @@
       },
     });
   }
-
-  // Render all the rows including the first title row
-  private renderRows() {
-    const data = this.data;
-    const rows: m.Children = [];
-
-    rows.push(
-      m(
-        `.row`,
-        m('.cell.row-header', 'Timestamp'),
-        m('.cell.row-header', 'Name'),
-        m('.cell.row-header', 'CPU'),
-        m('.cell.row-header', 'Process'),
-        m('.cell.row-header', 'Args'),
-      ),
-    );
-
-    if (data) {
-      const {events, offset, numEvents} = data;
-      for (let i = 0; i < events.length; i++) {
-        const {ts, name, cpu, process, args} = events[i];
-
-        const timestamp = m(Timestamp, {ts});
-
-        const rank = i + offset;
-
-        const color = colorForFtrace(name).base.cssString;
-
-        rows.push(
-          m(
-            `.row`,
-            {
-              style: {top: `${(rank + 1.0) * ROW_H}px`},
-              onmouseover: this.onRowOver.bind(this, ts),
-              onmouseout: this.onRowOut.bind(this),
-            },
-            m('.cell', timestamp),
-            m('.cell', m('span.colour', {style: {background: color}}), name),
-            m('.cell', cpu),
-            m('.cell', process),
-            m('.cell', args),
-          ),
-        );
-      }
-      return m('.rows', {style: {height: `${numEvents * ROW_H}px`}}, rows);
-    } else {
-      return m('.rows', rows);
-    }
-  }
 }
 
 async function lookupFtraceEvents(
diff --git a/ui/src/tracks/process_summary/index.ts b/ui/src/tracks/process_summary/index.ts
index 37ac68f..ea6845f 100644
--- a/ui/src/tracks/process_summary/index.ts
+++ b/ui/src/tracks/process_summary/index.ts
@@ -103,8 +103,8 @@
           tags: {
             isDebuggable,
           },
-          trackFactory: ({trackKey}) => {
-            return new ProcessSchedulingTrack(ctx.engine, trackKey, config);
+          trackFactory: () => {
+            return new ProcessSchedulingTrack(ctx.engine, config);
           },
         });
       } else {
diff --git a/ui/src/tracks/process_summary/process_scheduling_track.ts b/ui/src/tracks/process_summary/process_scheduling_track.ts
index 5905de2..9725c29 100644
--- a/ui/src/tracks/process_summary/process_scheduling_track.ts
+++ b/ui/src/tracks/process_summary/process_scheduling_track.ts
@@ -27,6 +27,7 @@
 import {PanelSize} from '../../frontend/panel';
 import {EngineProxy, Track} from '../../public';
 import {LONG, NUM, QueryResult} from '../../trace_processor/query_result';
+import {uuidv4Sql} from '../../base/uuid';
 
 export const PROCESS_SCHEDULING_TRACK_KIND = 'ProcessSchedulingTrack';
 
@@ -57,13 +58,12 @@
   private fetcher = new TimelineFetcher(this.onBoundsChange.bind(this));
   private maxCpu = 0;
   private engine: EngineProxy;
-  private trackKey: string;
+  private trackUuid = uuidv4Sql();
   private config: Config;
 
-  constructor(engine: EngineProxy, trackKey: string, config: Config) {
+  constructor(engine: EngineProxy, config: Config) {
     this.engine = engine;
     this.config = config;
-    this.trackKey = trackKey.split('-').join('_');
   }
 
   async onCreate(): Promise<void> {
@@ -75,7 +75,7 @@
 
     if (this.config.upid !== null) {
       await this.engine.query(`
-        create virtual table process_scheduling_${this.trackKey}
+        create virtual table process_scheduling_${this.trackUuid}
         using __intrinsic_slice_mipmap((
           select
             id,
@@ -95,7 +95,7 @@
     } else {
       assertExists(this.config.utid);
       await this.engine.query(`
-        create virtual table process_scheduling_${this.trackKey}
+        create virtual table process_scheduling_${this.trackUuid}
         using __intrinsic_slice_mipmap((
           select
             id,
@@ -121,7 +121,7 @@
     this.fetcher.dispose();
     if (this.engine.isAlive) {
       await this.engine.query(`
-        drop table process_scheduling_${this.trackKey}
+        drop table process_scheduling_${this.trackUuid}
       `);
     }
   }
@@ -182,7 +182,7 @@
         s.id,
         z.depth as cpu,
         utid
-      from process_scheduling_${this.trackKey}(
+      from process_scheduling_${this.trackUuid}(
         ${start}, ${end}, ${bucketSize}
       ) z
       cross join sched s using (id)
diff --git a/ui/src/widgets/virtual_scroll_container.ts b/ui/src/widgets/virtual_scroll_container.ts
deleted file mode 100644
index f6bd052..0000000
--- a/ui/src/widgets/virtual_scroll_container.ts
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (C) 2023 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-import m from 'mithril';
-
-import {findRef, toHTMLElement} from '../base/dom_utils';
-
-interface VirtualScrollContainerAttrs {
-  // Called when the scrolling element is created, updates, or scrolls.
-  onScroll?: (dom: HTMLElement) => void;
-}
-
-export class VirtualScrollContainer
-  implements m.ClassComponent<VirtualScrollContainerAttrs>
-{
-  private readonly REF = 'virtual-scroll-container';
-  view({attrs, children}: m.Vnode<VirtualScrollContainerAttrs>) {
-    const {onScroll = () => {}} = attrs;
-
-    return m(
-      '.pf-virtual-scroll-container',
-      {
-        ref: this.REF,
-        onscroll: (e: Event) => onScroll(e.target as HTMLElement),
-      },
-      children,
-    );
-  }
-
-  oncreate({dom, attrs}: m.VnodeDOM<VirtualScrollContainerAttrs, this>) {
-    const {onScroll = () => {}} = attrs;
-
-    const element = findRef(dom, this.REF);
-    if (element) {
-      onScroll(toHTMLElement(element));
-    }
-  }
-}
diff --git a/ui/src/widgets/virtual_scroll_helper.ts b/ui/src/widgets/virtual_scroll_helper.ts
new file mode 100644
index 0000000..4fbe5c1
--- /dev/null
+++ b/ui/src/widgets/virtual_scroll_helper.ts
@@ -0,0 +1,150 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {Trash} from '../base/disposable';
+import * as Geometry from '../base/geom';
+
+export interface VirtualScrollHelperOpts {
+  overdrawPx: number;
+
+  // How close we can get to undrawn regions before updating
+  tolerancePx: number;
+
+  callback: (r: Geometry.Rect) => void;
+}
+
+export interface Data {
+  opts: VirtualScrollHelperOpts;
+  rect?: Geometry.Rect;
+}
+
+export class VirtualScrollHelper {
+  private readonly _trash = new Trash();
+  private readonly _data: Data[] = [];
+
+  constructor(
+    sliderElement: HTMLElement,
+    containerElement: Element,
+    opts: VirtualScrollHelperOpts[] = [],
+  ) {
+    this._data = opts.map((opts) => {
+      return {opts};
+    });
+
+    const recalculateRects = () => {
+      this._data.forEach((data) =>
+        recalculatePuckRect(sliderElement, containerElement, data),
+      );
+    };
+
+    containerElement.addEventListener('scroll', recalculateRects, {
+      passive: true,
+    });
+    this._trash.addCallback(() =>
+      containerElement.removeEventListener('scroll', recalculateRects),
+    );
+
+    // Resize observer callbacks are called once immediately
+    const resizeObserver = new ResizeObserver(() => {
+      recalculateRects();
+    });
+
+    resizeObserver.observe(containerElement);
+    resizeObserver.observe(sliderElement);
+    this._trash.addCallback(() => {
+      resizeObserver.disconnect();
+    });
+  }
+
+  dispose() {
+    this._trash.dispose();
+  }
+}
+
+function recalculatePuckRect(
+  sliderElement: HTMLElement,
+  containerElement: Element,
+  data: Data,
+): void {
+  const {tolerancePx, overdrawPx, callback} = data.opts;
+  if (!data.rect) {
+    const targetPuckRect = getTargetPuckRect(
+      sliderElement,
+      containerElement,
+      overdrawPx,
+    );
+    callback(targetPuckRect);
+    data.rect = targetPuckRect;
+  } else {
+    const viewportRect = containerElement.getBoundingClientRect();
+
+    // Expand the viewportRect by the tolerance
+    const viewportExpandedRect = Geometry.expandRect(viewportRect, tolerancePx);
+
+    const sliderClientRect = sliderElement.getBoundingClientRect();
+    const viewportClamped = Geometry.intersectRects(
+      viewportExpandedRect,
+      sliderClientRect,
+    );
+
+    // Translate the puck rect into client space (currently in slider space)
+    const puckClientRect = Geometry.translateRect(data.rect, {
+      x: sliderClientRect.x,
+      y: sliderClientRect.y,
+    });
+
+    // Check if the tolerance rect entirely contains the expanded viewport rect
+    // If not, request an update
+    if (!Geometry.containsRect(puckClientRect, viewportClamped)) {
+      const targetPuckRect = getTargetPuckRect(
+        sliderElement,
+        containerElement,
+        overdrawPx,
+      );
+      callback(targetPuckRect);
+      data.rect = targetPuckRect;
+    }
+  }
+}
+
+// Returns what the puck rect should look like
+function getTargetPuckRect(
+  sliderElement: HTMLElement,
+  containerElement: Element,
+  overdrawPx: number,
+) {
+  const sliderElementRect = sliderElement.getBoundingClientRect();
+  const containerRect = containerElement.getBoundingClientRect();
+
+  // Calculate the intersection of the container's viewport and the target
+  const intersection = Geometry.intersectRects(
+    containerRect,
+    sliderElementRect,
+  );
+
+  // Pad the intersection by the overdraw amount
+  const intersectionExpanded = Geometry.expandRect(intersection, overdrawPx);
+
+  // Intersect with the original target rect unless we want to avoid resizes
+  const targetRect = Geometry.intersectRects(
+    intersectionExpanded,
+    sliderElementRect,
+  );
+
+  return Geometry.rebaseRect(
+    targetRect,
+    sliderElementRect.x,
+    sliderElementRect.y,
+  );
+}
diff --git a/ui/src/widgets/virtual_table.ts b/ui/src/widgets/virtual_table.ts
new file mode 100644
index 0000000..0b97b96
--- /dev/null
+++ b/ui/src/widgets/virtual_table.ts
@@ -0,0 +1,262 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import m from 'mithril';
+import {Trash} from '../base/disposable';
+import {findRef, toHTMLElement} from '../base/dom_utils';
+import {Rect} from '../base/geom';
+import {assertExists} from '../base/logging';
+import {Style} from './common';
+import {scheduleFullRedraw} from './raf';
+import {VirtualScrollHelper} from './virtual_scroll_helper';
+
+/**
+ * The |VirtualTable| widget can be useful when attempting to render a large
+ * amount of tabular data - i.e. dumping the entire contents of a database
+ * table.
+ *
+ * A naive approach would be to load the entire dataset from the table and
+ * render it into the DOM. However, this has a number of disadvantages:
+ * - The query could potentially be very slow on large enough datasets.
+ * - The amount of data pulled could be larger than the available memory.
+ * - Rendering thousands of DOM elements using Mithril can get be slow.
+ * - Asking the browser to create and update thousands of elements on the DOM
+ *   can also be slow.
+ *
+ * This implementation takes advantage of the fact that computer monitors are
+ * only so tall, so most will only be able to display a small subset of rows at
+ * a given time, and the user will have to scroll to reveal more data.
+ *
+ * Thus, this widgets operates in such a way as to only render the DOM elements
+ * that are visible within the given scrolling container's viewport. To avoid
+ * spamming render updates, we render a few more rows above and below the
+ * current viewport, and only trigger an update once the user scrolls too close
+ * to the edge of the rendered data. These margins and tolerances are
+ * configurable with the |renderOverdrawPx| and |renderTolerancePx| attributes.
+ *
+ * When it comes to loading data, it's often more performant to run fewer large
+ * queries compared to more frequent smaller queries. Running a new query every
+ * time we want to update the DOM is usually too frequent, and results in
+ * flickering as the data is usually not loaded at the time the relevant row
+ * scrolls into view.
+ *
+ * Thus, this implementation employs two sets of limits, one to refresh the DOM
+ * and one larger one to re-query the data. The latter may be configured using
+ * the |queryOverdrawPx| and |queryTolerancePx| attributes.
+ *
+ * The smaller DOM refreshes and handled internally, but the user must be called
+ * to invoke a new query update. When new data is required, the |onReload|
+ * callback is called with the row offset and count.
+ *
+ * The data must be passed in the |data| attribute which contains the offset of
+ * the currently loaded data and a number of rows.
+ *
+ * Row and column content is flexible as m.Children are accepted and passed
+ * straight to mithril.
+ *
+ * The widget is quite opinionated in terms of its styling, but the entire
+ * widget and each row may be tweaked using |className| and |style| attributes
+ * which behave in the same way as they do on other Mithril components.
+ */
+
+export interface VirtualTableAttrs {
+  // A list of columns containing the header row content and column widths
+  columns: VirtualTableColumn[];
+
+  // Row height in px (each row must have the same height)
+  rowHeight: number;
+
+  // Offset of the first row
+  firstRowOffset: number;
+
+  // Total number of rows
+  numRows: number;
+
+  // The row data to render
+  rows: VirtualTableRow[];
+
+  // Optional: Called when we need to reload data
+  onReload?: (rowOffset: number, rowCount: number) => void;
+
+  // Additional class name applied to the table container element
+  className?: string;
+
+  // Additional styles applied to the table container element
+  style?: Style;
+
+  // Optional: Called when a row is hovered, passing the hovered row's id
+  onRowHover?: (id: number) => void;
+
+  // Optional: Called when a row is un-hovered, passing the un-hovered row's id
+  onRowOut?: (id: number) => void;
+
+  // Optional: Number of pixels equivalent of rows to overdraw above and below
+  // the viewport
+  // Defaults to a sensible value
+  renderOverdrawPx?: number;
+
+  // Optional: How close we can get to the edge before triggering a DOM redraw
+  // Defaults to a sensible value
+  renderTolerancePx?: number;
+
+  // Optional: Number of pixels equivalent of rows to query above and below the
+  // viewport
+  // Defaults to a sensible value
+  queryOverdrawPx?: number;
+
+  // Optional: How close we can get to the edge if the loaded data before we
+  // trigger another query
+  // Defaults to a sensible value
+  queryTolerancePx?: number;
+}
+
+export interface VirtualTableColumn {
+  // Content to render in the header row
+  header: m.Children;
+
+  // CSS width e.g. 12px, 4em, etc...
+  width: string;
+}
+
+export interface VirtualTableRow {
+  // Id for this row (must be unique within this dataset)
+  // Used for callbacks and as a Mithril key.
+  id: number;
+
+  // Data for each column in this row - must match number of elements in columns
+  cells: m.Children[];
+
+  // Optional: Additional class name applied to the row element
+  className?: string;
+}
+
+export class VirtualTable implements m.ClassComponent<VirtualTableAttrs> {
+  private readonly CONTAINER_REF = 'CONTAINER';
+  private readonly SLIDER_REF = 'SLIDER';
+  private readonly trash = new Trash();
+  private renderBounds = {rowStart: 0, rowEnd: 0};
+
+  view({attrs}: m.Vnode<VirtualTableAttrs>): m.Children {
+    const {columns, className, numRows, rowHeight, style} = attrs;
+    return m(
+      '.pf-vtable',
+      {className, style, ref: this.CONTAINER_REF},
+      m(
+        '.pf-vtable-content',
+        m(
+          '.pf-vtable-header',
+          columns.map((col) =>
+            m('.pf-vtable-data', {style: {width: col.width}}, col.header),
+          ),
+        ),
+        m(
+          '.pf-vtable-slider',
+          {ref: this.SLIDER_REF, style: {height: `${rowHeight * numRows}px`}},
+          m(
+            '.pf-vtable-puck',
+            {
+              style: {
+                transform: `translateY(${
+                  this.renderBounds.rowStart * rowHeight
+                }px)`,
+              },
+            },
+            this.renderContent(attrs),
+          ),
+        ),
+      ),
+    );
+  }
+
+  private renderContent(attrs: VirtualTableAttrs): m.Children {
+    const rows: m.ChildArray = [];
+    for (
+      let i = this.renderBounds.rowStart;
+      i < this.renderBounds.rowEnd;
+      ++i
+    ) {
+      rows.push(this.renderRow(attrs, i));
+    }
+    return rows;
+  }
+
+  private renderRow(attrs: VirtualTableAttrs, i: number): m.Children {
+    const {rows, firstRowOffset, rowHeight, columns, onRowHover, onRowOut} =
+      attrs;
+    if (i >= firstRowOffset && i < firstRowOffset + rows.length) {
+      // Render the row...
+      const index = i - firstRowOffset;
+      const rowData = rows[index];
+      return m(
+        '.pf-vtable-row',
+        {
+          className: rowData.className,
+          style: {height: `${rowHeight}px`},
+          onmouseover: () => {
+            onRowHover?.(rowData.id);
+          },
+          onmouseout: () => {
+            onRowOut?.(rowData.id);
+          },
+        },
+        rowData.cells.map((data, colIndex) =>
+          m('.pf-vtable-data', {style: {width: columns[colIndex].width}}, data),
+        ),
+      );
+    } else {
+      // Render a placeholder div with the same height as a row but a
+      // transparent background
+      return m('', {style: {height: `${rowHeight}px`}});
+    }
+  }
+
+  oncreate({dom, attrs}: m.VnodeDOM<VirtualTableAttrs>) {
+    const {
+      renderOverdrawPx = 200,
+      renderTolerancePx = 100,
+      queryOverdrawPx = 10_000,
+      queryTolerancePx = 5_000,
+    } = attrs;
+
+    const sliderEl = toHTMLElement(assertExists(findRef(dom, this.SLIDER_REF)));
+    const containerEl = assertExists(findRef(dom, this.CONTAINER_REF));
+    const virtualScrollHelper = new VirtualScrollHelper(sliderEl, containerEl, [
+      {
+        overdrawPx: renderOverdrawPx,
+        tolerancePx: renderTolerancePx,
+        callback: ({top, bottom}: Rect) => {
+          const height = bottom - top;
+          const rowStart = Math.floor(top / attrs.rowHeight / 2) * 2;
+          const rowCount = Math.ceil(height / attrs.rowHeight / 2) * 2;
+          this.renderBounds = {rowStart, rowEnd: rowStart + rowCount};
+          scheduleFullRedraw();
+        },
+      },
+      {
+        overdrawPx: queryOverdrawPx,
+        tolerancePx: queryTolerancePx,
+        callback: ({top, bottom}: Rect) => {
+          const rowStart = Math.floor(top / attrs.rowHeight / 2) * 2;
+          const rowEnd = Math.ceil(bottom / attrs.rowHeight);
+          attrs.onReload?.(rowStart, rowEnd - rowStart);
+        },
+      },
+    ]);
+    this.trash.add(virtualScrollHelper);
+  }
+
+  onremove(_: m.VnodeDOM<VirtualTableAttrs>) {
+    this.trash.dispose();
+  }
+}