Merge "Add metric and derived events for dropped frames"
diff --git a/Android.bp b/Android.bp
index 20b8ca9..f5484c8 100644
--- a/Android.bp
+++ b/Android.bp
@@ -8137,6 +8137,7 @@
     srcs: [
         "src/trace_processor/sqlite/db_sqlite_table.cc",
         "src/trace_processor/sqlite/query_constraints.cc",
+        "src/trace_processor/sqlite/register_function.cc",
         "src/trace_processor/sqlite/span_join_operator_table.cc",
         "src/trace_processor/sqlite/sql_stats_table.cc",
         "src/trace_processor/sqlite/sqlite3_str_split.cc",
diff --git a/BUILD b/BUILD
index f37aaa5..6422ad6 100644
--- a/BUILD
+++ b/BUILD
@@ -1154,6 +1154,8 @@
         "src/trace_processor/sqlite/query_cache.h",
         "src/trace_processor/sqlite/query_constraints.cc",
         "src/trace_processor/sqlite/query_constraints.h",
+        "src/trace_processor/sqlite/register_function.cc",
+        "src/trace_processor/sqlite/register_function.h",
         "src/trace_processor/sqlite/scoped_db.h",
         "src/trace_processor/sqlite/span_join_operator_table.cc",
         "src/trace_processor/sqlite/span_join_operator_table.h",
@@ -4184,3 +4186,14 @@
     python_version = "PY3",
     legacy_create_init = 0,
 )
+
+perfetto_py_binary(
+    name = "batch_trace_processor_shell",
+    srcs = ["tools/batch_trace_processor/main.py"],
+    main = "tools/batch_trace_processor/main.py",
+    deps = [
+        ":trace_processor_py",
+    ] + PERFETTO_CONFIG.deps.pandas_py,
+    python_version = "PY3",
+    legacy_create_init = 0,
+)
diff --git a/BUILD.extras b/BUILD.extras
index 6b31fb2..e546216 100644
--- a/BUILD.extras
+++ b/BUILD.extras
@@ -141,3 +141,14 @@
     python_version = "PY3",
     legacy_create_init = 0,
 )
+
+perfetto_py_binary(
+    name = "batch_trace_processor_shell",
+    srcs = ["tools/batch_trace_processor/main.py"],
+    main = "tools/batch_trace_processor/main.py",
+    deps = [
+        ":trace_processor_py",
+    ] + PERFETTO_CONFIG.deps.pandas_py,
+    python_version = "PY3",
+    legacy_create_init = 0,
+)
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 6be8335..9c87072 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -75,6 +75,7 @@
   results += RunAndReportIfLong(CheckProtoEventList, input, output)
   results += RunAndReportIfLong(CheckBannedCpp, input, output)
   results += RunAndReportIfLong(CheckSqlMetrics, input, output)
+  results += RunAndReportIfLong(CheckTestData, input, output)
   return results
 
 
@@ -277,3 +278,15 @@
   if subprocess.call([tool]):
     return [output_api.PresubmitError(tool + ' failed')]
   return []
+
+
+def CheckTestData(input_api, output_api):
+  tool = 'tools/test_data'
+  if subprocess.call([tool, 'status', '--quiet']):
+    return [
+        output_api.PresubmitError(
+            '//test/data is out of sync. Run ' + tool + ' status for more. \n' +
+            'If you rebaselined UI tests or added a new test trace, run: \n' +
+            'tools/test_data upload')
+    ]
+  return []
diff --git a/docs/contributing/testing.md b/docs/contributing/testing.md
index 6eabc54..67b9407 100644
--- a/docs/contributing/testing.md
+++ b/docs/contributing/testing.md
@@ -142,7 +142,7 @@
 
 ```
 ui/run-integrationtests --rebaseline
-tools/add_test_data test/data/ui-screenshots
+tools/test_data upload
 ```
 
 Once finished you can commit and upload as part of your CL to cause the CI to
diff --git a/include/perfetto/ext/base/unix_socket.h b/include/perfetto/ext/base/unix_socket.h
index 0d679e5..15f2eaa 100644
--- a/include/perfetto/ext/base/unix_socket.h
+++ b/include/perfetto/ext/base/unix_socket.h
@@ -321,8 +321,8 @@
     return Send(msg, len, nullptr, 0);
   }
 
-  inline bool Send(const std::string& msg) {
-    return Send(msg.c_str(), msg.size() + 1, -1);
+  inline bool SendStr(const std::string& msg) {
+    return Send(msg.data(), msg.size(), -1);
   }
 
   // Returns the number of bytes (<= |len|) written in |msg| or 0 if there
diff --git a/protos/perfetto/config/perfetto_config.proto b/protos/perfetto/config/perfetto_config.proto
index 4261b7d..5c83bee 100644
--- a/protos/perfetto/config/perfetto_config.proto
+++ b/protos/perfetto/config/perfetto_config.proto
@@ -11,6 +11,8 @@
 
 package perfetto.protos;
 
+option go_package = "github.com/google/perfetto/perfetto_proto";
+
 // Begin of protos/perfetto/common/gpu_counter_descriptor.proto
 
 // Description of GPU counters.
diff --git a/protos/perfetto/metrics/perfetto_merged_metrics.proto b/protos/perfetto/metrics/perfetto_merged_metrics.proto
index ff89c7b..36e5073 100644
--- a/protos/perfetto/metrics/perfetto_merged_metrics.proto
+++ b/protos/perfetto/metrics/perfetto_merged_metrics.proto
@@ -11,6 +11,8 @@
 
 package perfetto.protos;
 
+option go_package = "github.com/google/perfetto/perfetto_proto";
+
 // Begin of protos/perfetto/metrics/android/batt_metric.proto
 
 message AndroidBatteryMetric {
diff --git a/protos/perfetto/trace/perfetto_trace.proto b/protos/perfetto/trace/perfetto_trace.proto
index d328ecb..27f8b91 100644
--- a/protos/perfetto/trace/perfetto_trace.proto
+++ b/protos/perfetto/trace/perfetto_trace.proto
@@ -11,6 +11,8 @@
 
 package perfetto.protos;
 
+option go_package = "github.com/google/perfetto/perfetto_proto";
+
 // Begin of protos/perfetto/common/gpu_counter_descriptor.proto
 
 // Description of GPU counters.
diff --git a/protos/third_party/chromium/chrome_track_event.proto b/protos/third_party/chromium/chrome_track_event.proto
index abf98e6..d856269 100644
--- a/protos/third_party/chromium/chrome_track_event.proto
+++ b/protos/third_party/chromium/chrome_track_event.proto
@@ -349,6 +349,8 @@
     WRITE_STATUS_WRITING_TO_TRACE = 3;
   }
   optional WriteStatus write_status = 2;
+
+  optional int32 sampled_thread_id = 3;
 }
 
 // Reports the latency caused by each breakdown in the
diff --git a/src/base/unix_socket_unittest.cc b/src/base/unix_socket_unittest.cc
index b16f3ff..86a6bb5 100644
--- a/src/base/unix_socket_unittest.cc
+++ b/src/base/unix_socket_unittest.cc
@@ -131,8 +131,8 @@
 
   // On Windows the first send immediately after the disconnection succeeds, the
   // kernel will detect the disconnection only later.
-  cli->Send(".");
-  EXPECT_FALSE(cli->Send("should_fail_both_on_win_and_unix"));
+  cli->SendStr(".");
+  EXPECT_FALSE(cli->SendStr("should_fail_both_on_win_and_unix"));
   task_runner_.RunUntilCheckpoint("cli_disconnected");
 }
 
@@ -177,8 +177,8 @@
         ASSERT_EQ("cli>srv", s->ReceiveString());
         srv_did_recv();
       }));
-  ASSERT_TRUE(cli->Send("cli>srv"));
-  ASSERT_TRUE(srv_conn->Send("srv>cli"));
+  ASSERT_TRUE(cli->SendStr("cli>srv"));
+  ASSERT_TRUE(srv_conn->SendStr("srv>cli"));
   task_runner_.RunUntilCheckpoint("cli_did_recv");
   task_runner_.RunUntilCheckpoint("srv_did_recv");
 
@@ -192,8 +192,8 @@
   ASSERT_EQ("", cli->ReceiveString());
   ASSERT_EQ(0u, srv_conn->Receive(&msg, sizeof(msg)));
   ASSERT_EQ("", srv_conn->ReceiveString());
-  ASSERT_FALSE(cli->Send("foo"));
-  ASSERT_FALSE(srv_conn->Send("bar"));
+  ASSERT_FALSE(cli->SendStr("foo"));
+  ASSERT_FALSE(srv_conn->SendStr("bar"));
   srv->Shutdown(true);
   task_runner_.RunUntilCheckpoint("cli_disconnected");
   task_runner_.RunUntilCheckpoint("srv_disconnected");
@@ -250,7 +250,7 @@
         EXPECT_CALL(event_listener_, OnDataAvailable(s))
             .WillOnce(Invoke([](UnixSocket* t) {
               ASSERT_EQ("PING", t->ReceiveString());
-              ASSERT_TRUE(t->Send("PONG"));
+              ASSERT_TRUE(t->SendStr("PONG"));
             }));
       }));
 
@@ -261,7 +261,7 @@
     EXPECT_CALL(event_listener_, OnConnect(cli[i].get(), true))
         .WillOnce(Invoke([](UnixSocket* s, bool success) {
           ASSERT_TRUE(success);
-          ASSERT_TRUE(s->Send("PING"));
+          ASSERT_TRUE(s->SendStr("PING"));
         }));
 
     auto checkpoint = task_runner_.CreateCheckpoint(std::to_string(i));
@@ -405,7 +405,7 @@
   task_runner_.RunUntilCheckpoint("cli_connected");
   srv->Shutdown(true);
 
-  cli->Send("test");
+  cli->SendStr("test");
 
   ASSERT_NE(peer, nullptr);
   auto raw_sock = peer->ReleaseSocket();
@@ -413,10 +413,10 @@
   EXPECT_CALL(event_listener_, OnDataAvailable(_)).Times(0);
   task_runner_.RunUntilIdle();
 
-  char buf[sizeof("test")];
+  char buf[5];
   ASSERT_TRUE(raw_sock);
-  ASSERT_EQ(raw_sock.Receive(buf, sizeof(buf)),
-            static_cast<ssize_t>(sizeof(buf)));
+  ASSERT_EQ(raw_sock.Receive(buf, sizeof(buf)), 4);
+  buf[sizeof(buf) - 1] = '\0';
   ASSERT_STREQ(buf, "test");
 }
 
@@ -445,7 +445,7 @@
             .WillRepeatedly(Invoke([](UnixSocket* cli_sock) {
               cli_sock->ReceiveString();  // Read connection EOF;
             }));
-        ASSERT_TRUE(s->Send("welcome"));
+        ASSERT_TRUE(s->SendStr("welcome"));
       }));
 
   for (size_t i = 0; i < kNumClients; i++) {
@@ -717,7 +717,7 @@
 
           // Now change the shared memory and ping the other process.
           memcpy(mem, "rock more", 10);
-          ASSERT_TRUE(s->Send("change notify"));
+          ASSERT_TRUE(s->SendStr("change notify"));
           checkpoint();
         }));
     task_runner_.RunUntilCheckpoint("change_seen_by_client");
diff --git a/src/profiling/common/unwind_support.cc b/src/profiling/common/unwind_support.cc
index 9b72d3e..433c052 100644
--- a/src/profiling/common/unwind_support.cc
+++ b/src/profiling/common/unwind_support.cc
@@ -69,7 +69,6 @@
 
   unwindstack::SharedString name("");
   std::shared_ptr<unwindstack::MapInfo> prev_map;
-  std::shared_ptr<unwindstack::MapInfo> prev_real_map;
   return android::procinfo::ReadMapFileContent(
       &content[0], [&](const android::procinfo::MapInfo& mapinfo) {
         // Mark a device map in /dev/ and not in /dev/ashmem/ specially.
@@ -83,12 +82,9 @@
           name = unwindstack::SharedString(mapinfo.name);
         }
         maps_.emplace_back(unwindstack::MapInfo::Create(
-            prev_map, prev_real_map, mapinfo.start, mapinfo.end, mapinfo.pgoff,
+            prev_map, mapinfo.start, mapinfo.end, mapinfo.pgoff,
             flags, name));
         prev_map = maps_.back();
-        if (!prev_map->IsBlank()) {
-          prev_real_map = prev_map;
-        }
       });
 }
 
diff --git a/src/trace_processor/importers/common/process_tracker.cc b/src/trace_processor/importers/common/process_tracker.cc
index 0981063..e9b4c9e 100644
--- a/src/trace_processor/importers/common/process_tracker.cc
+++ b/src/trace_processor/importers/common/process_tracker.cc
@@ -51,7 +51,7 @@
 
   auto* thread_table = context_->storage->mutable_thread_table();
   UniqueTid new_utid = thread_table->Insert(row).row;
-  tids_[tid].emplace_back(new_utid);
+  tids_[tid] = new_utid;
   PERFETTO_DCHECK(thread_name_priorities_.size() == new_utid);
   thread_name_priorities_.push_back(ThreadNamePriority::kOther);
   return new_utid;
@@ -78,8 +78,7 @@
 
   // Remove the thread from the list of threads being tracked as any event after
   // this one should be ignored.
-  auto& vector = tids_[tid];
-  vector.erase(std::remove(vector.begin(), vector.end(), utid));
+  tids_.erase(tid);
 
   auto opt_upid = thread_table->upid()[utid];
   if (!opt_upid.has_value() || process_table->pid()[*opt_upid] != tid)
@@ -169,34 +168,32 @@
   auto* threads = context_->storage->mutable_thread_table();
   auto* processes = context_->storage->mutable_process_table();
 
-  auto vector_it = tids_.find(tid);
-  if (vector_it == tids_.end())
+  auto it = tids_.find(tid);
+  if (it == tids_.end())
     return base::nullopt;
 
-  // Iterate backwards through the threads so ones later in the trace are more
-  // likely to be picked.
-  const auto& vector = vector_it->second;
-  for (auto it = vector.rbegin(); it != vector.rend(); it++) {
-    UniqueTid current_utid = *it;
+  UniqueTid current_utid = it->second;
 
-    // If we finished this thread, we should have removed it from the vector
-    // entirely.
-    PERFETTO_DCHECK(!threads->end_ts()[current_utid].has_value());
+  // If we finished this thread, we should have removed it from the tids map
+  // entirely.
+  PERFETTO_DCHECK(!threads->end_ts()[current_utid].has_value());
 
-    // If the thread is dead, ignore it.
-    if (!IsThreadAlive(current_utid))
-      continue;
-
-    // If we don't know the parent process, we have to choose this thread.
-    auto opt_current_upid = threads->upid()[current_utid];
-    if (!opt_current_upid)
-      return current_utid;
-
-    // We found a thread that matches both the tid and its parent pid.
-    uint32_t current_pid = processes->pid()[*opt_current_upid];
-    if (!pid || current_pid == *pid)
-      return current_utid;
+  // If the thread is dead, remove it from the map and return null.
+  if (!IsThreadAlive(current_utid)) {
+    tids_.erase(tid);
+    return base::nullopt;
   }
+
+  // If we don't know the parent process, we have to choose this thread.
+  auto opt_current_upid = threads->upid()[current_utid];
+  if (!opt_current_upid)
+    return current_utid;
+
+  // We found a thread that matches both the tid and its parent pid.
+  uint32_t current_pid = processes->pid()[*opt_current_upid];
+  if (!pid || current_pid == *pid)
+    return current_utid;
+
   return base::nullopt;
 }
 
@@ -459,7 +456,7 @@
 
 void ProcessTracker::SetPidZeroIgnoredForIdleProcess() {
   // Create a mapping from (t|p)id 0 -> u(t|p)id 0 for the idle process.
-  tids_.emplace(0, std::vector<UniqueTid>{0});
+  tids_.emplace(0, 0);
   pids_.emplace(0, 0);
 
   auto swapper_id = context_->storage->InternString("swapper");
diff --git a/src/trace_processor/importers/common/process_tracker.h b/src/trace_processor/importers/common/process_tracker.h
index 1a062cf..c857707 100644
--- a/src/trace_processor/importers/common/process_tracker.h
+++ b/src/trace_processor/importers/common/process_tracker.h
@@ -50,15 +50,6 @@
   ProcessTracker& operator=(const ProcessTracker&) = delete;
   virtual ~ProcessTracker();
 
-  using UniqueProcessIterator =
-      std::multimap<uint32_t, UniquePid>::const_iterator;
-  using UniqueProcessBounds =
-      std::pair<UniqueProcessIterator, UniqueProcessIterator>;
-
-  using UniqueThreadIterator = std::vector<UniqueTid>::const_iterator;
-  using UniqueThreadBounds =
-      std::pair<UniqueThreadIterator, UniqueThreadIterator>;
-
   // TODO(b/110409911): Invalidation of process and threads is yet to be
   // implemented. This will include passing timestamps into the below methods
   // to ensure the correct upid/utid is found.
@@ -137,15 +128,16 @@
 
   // Returns the bounds of a range that includes all UniquePids that have the
   // requested pid.
-  UniqueProcessBounds UpidsForPidForTesting(uint32_t pid) {
-    return pids_.equal_range(pid);
+  base::Optional<UniquePid> UpidForPidForTesting(uint32_t pid) {
+    auto it = pids_.find(pid);
+    return it == pids_.end() ? base::nullopt : base::make_optional(it->second);
   }
 
   // Returns the bounds of a range that includes all UniqueTids that have the
   // requested tid.
-  UniqueThreadBounds UtidsForTidForTesting(uint32_t tid) {
-    const auto& deque = tids_[tid];
-    return std::make_pair(deque.begin(), deque.end());
+  base::Optional<UniqueTid> UtidForTidForTesting(uint32_t tid) {
+    auto it = tids_.find(tid);
+    return it == tids_.end() ? base::nullopt : base::make_optional(it->second);
   }
 
   // Marks the two threads as belonging to the same process, even if we don't
@@ -187,13 +179,11 @@
 
   ArgsTracker args_tracker_;
 
-  // Each tid can have multiple UniqueTid entries, a new UniqueTid is assigned
-  // each time a thread is seen in the trace.
-  std::map<uint32_t /* tid */, std::vector<UniqueTid>> tids_;
+  // Keep the mapping of the most recently seen tid to the associated utid.
+  std::unordered_map<uint32_t /* tid */, UniqueTid> tids_;
 
-  // Each pid can have multiple UniquePid entries, a new UniquePid is assigned
-  // each time a process is seen in the trace.
-  std::map<uint32_t /* pid (aka tgid) */, UniquePid> pids_;
+  // Keep the mapping of the most recently seen pid to the associated upid.
+  std::unordered_map<uint32_t /* pid (aka tgid) */, UniquePid> pids_;
 
   // Pending thread associations. The meaning of a pair<ThreadA, ThreadB> in
   // this vector is: we know that A and B belong to the same process, but we
diff --git a/src/trace_processor/importers/common/process_tracker_unittest.cc b/src/trace_processor/importers/common/process_tracker_unittest.cc
index 7ed1bd4..7b32f2a 100644
--- a/src/trace_processor/importers/common/process_tracker_unittest.cc
+++ b/src/trace_processor/importers/common/process_tracker_unittest.cc
@@ -47,8 +47,8 @@
 TEST_F(ProcessTrackerTest, PushProcess) {
   context.process_tracker->SetProcessMetadata(1, base::nullopt, "test",
                                               base::StringView());
-  auto pair_it = context.process_tracker->UpidsForPidForTesting(1);
-  ASSERT_EQ(pair_it.first->second, 1u);
+  auto opt_upid = context.process_tracker->UpidForPidForTesting(1);
+  ASSERT_EQ(opt_upid.value_or(-1), 1u);
 }
 
 TEST_F(ProcessTrackerTest, GetOrCreateNewProcess) {
@@ -68,9 +68,8 @@
                                               base::StringView());
   context.process_tracker->SetProcessMetadata(1, base::nullopt, "test",
                                               base::StringView());
-  auto pair_it = context.process_tracker->UpidsForPidForTesting(1);
-  ASSERT_EQ(pair_it.first->second, 1u);
-  ASSERT_EQ(++pair_it.first, pair_it.second);
+  auto opt_upid = context.process_tracker->UpidForPidForTesting(1);
+  ASSERT_EQ(opt_upid.value_or(-1), 1u);
 }
 
 TEST_F(ProcessTrackerTest, PushTwoProcessEntries_DifferentPid) {
@@ -78,10 +77,10 @@
                                               base::StringView());
   context.process_tracker->SetProcessMetadata(3, base::nullopt, "test",
                                               base::StringView());
-  auto pair_it = context.process_tracker->UpidsForPidForTesting(1);
-  ASSERT_EQ(pair_it.first->second, 1u);
-  auto second_pair_it = context.process_tracker->UpidsForPidForTesting(3);
-  ASSERT_EQ(second_pair_it.first->second, 2u);
+  auto opt_upid = context.process_tracker->UpidForPidForTesting(1);
+  ASSERT_EQ(opt_upid.value_or(-1), 1u);
+  opt_upid = context.process_tracker->UpidForPidForTesting(3);
+  ASSERT_EQ(opt_upid.value_or(-1), 2u);
 }
 
 TEST_F(ProcessTrackerTest, AddProcessEntry_CorrectName) {
@@ -98,11 +97,11 @@
   // We expect 3 threads: Invalid thread, main thread for pid, tid 12.
   ASSERT_EQ(context.storage->thread_table().row_count(), 3u);
 
-  auto tid_it = context.process_tracker->UtidsForTidForTesting(12);
-  ASSERT_NE(tid_it.first, tid_it.second);
+  auto opt_upid = context.process_tracker->UtidForTidForTesting(12);
+  ASSERT_TRUE(opt_upid.has_value());
   ASSERT_EQ(context.storage->thread_table().upid()[1].value(), 1u);
-  auto pid_it = context.process_tracker->UpidsForPidForTesting(2);
-  ASSERT_NE(pid_it.first, pid_it.second);
+  opt_upid = context.process_tracker->UtidForTidForTesting(2);
+  ASSERT_TRUE(opt_upid.has_value());
   ASSERT_EQ(context.storage->process_table().row_count(), 2u);
 }
 
diff --git a/src/trace_processor/metrics/metrics.cc b/src/trace_processor/metrics/metrics.cc
index f0c706b..3bfff9b 100644
--- a/src/trace_processor/metrics/metrics.cc
+++ b/src/trace_processor/metrics/metrics.cc
@@ -20,10 +20,12 @@
 #include <unordered_map>
 #include <vector>
 
+#include "perfetto/base/status.h"
 #include "perfetto/ext/base/string_utils.h"
 #include "perfetto/ext/base/utils.h"
 #include "perfetto/protozero/scattered_heap_buffer.h"
 #include "src/trace_processor/metrics/sql_metrics.h"
+#include "src/trace_processor/sqlite/sqlite_utils.h"
 #include "src/trace_processor/tp_metatrace.h"
 #include "src/trace_processor/util/status_macros.h"
 
@@ -36,46 +38,6 @@
 
 namespace {
 
-SqlValue SqlValueFromSqliteValue(sqlite3_value* value) {
-  SqlValue sql_value;
-  switch (sqlite3_value_type(value)) {
-    case SQLITE_INTEGER:
-      sql_value.type = SqlValue::Type::kLong;
-      sql_value.long_value = sqlite3_value_int64(value);
-      break;
-    case SQLITE_FLOAT:
-      sql_value.type = SqlValue::Type::kDouble;
-      sql_value.double_value = sqlite3_value_double(value);
-      break;
-    case SQLITE_TEXT:
-      sql_value.type = SqlValue::Type::kString;
-      sql_value.string_value =
-          reinterpret_cast<const char*>(sqlite3_value_text(value));
-      break;
-    case SQLITE_BLOB:
-      sql_value.type = SqlValue::Type::kBytes;
-      sql_value.bytes_value = sqlite3_value_blob(value);
-      sql_value.bytes_count = static_cast<size_t>(sqlite3_value_bytes(value));
-      break;
-  }
-  return sql_value;
-}
-
-base::Optional<std::string> SqlValueToString(SqlValue value) {
-  switch (value.type) {
-    case SqlValue::Type::kString:
-      return value.AsString();
-    case SqlValue::Type::kDouble:
-      return std::to_string(value.AsDouble());
-    case SqlValue::Type::kLong:
-      return std::to_string(value.AsLong());
-    case SqlValue::Type::kBytes:
-    case SqlValue::Type::kNull:
-      return base::nullopt;
-  }
-  PERFETTO_FATAL("For GCC");
-}
-
 base::Status ValidateSingleNonEmptyMessage(const uint8_t* ptr,
                                            size_t size,
                                            uint32_t schema_type,
@@ -523,22 +485,24 @@
   return 0;
 }
 
-void NullIfEmpty(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
+base::Status NullIfEmpty::Run(void*,
+                              size_t argc,
+                              sqlite3_value** argv,
+                              SqlValue& out,
+                              Destructors&) {
   // SQLite should enforce this for us.
   PERFETTO_CHECK(argc == 1);
 
   if (sqlite3_value_type(argv[0]) != SQLITE_BLOB) {
-    sqlite3_result_error(
-        ctx, "NULL_IF_EMPTY: should only be called with bytes argument", -1);
-    return;
+    return base::ErrStatus(
+        "NULL_IF_EMPTY: should only be called with bytes argument");
   }
 
-  if (sqlite3_value_bytes(argv[0]) == 0) {
-    sqlite3_result_null(ctx);
-    return;
-  }
+  if (sqlite3_value_bytes(argv[0]) == 0)
+    return base::OkStatus();
 
-  sqlite3_result_value(ctx, argv[0]);
+  out = sqlite_utils::SqliteValueToSqlValue(argv[0]);
+  return base::OkStatus();
 }
 
 void RepeatedFieldStep(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
@@ -561,7 +525,7 @@
     *builder_ptr_ptr = new RepeatedFieldBuilder();
   }
 
-  auto value = SqlValueFromSqliteValue(argv[0]);
+  auto value = sqlite_utils::SqliteValueToSqlValue(argv[0]);
   RepeatedFieldBuilder* builder = *builder_ptr_ptr;
   auto status = builder->AddSqlValue(value);
   if (!status.ok()) {
@@ -604,31 +568,25 @@
 // as byte blobs (as they were built recursively using this function).
 // The return value is the built proto or an error about why the proto could
 // not be built.
-void BuildProto(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
-  const auto* fn_ctx =
-      static_cast<const BuildProtoContext*>(sqlite3_user_data(ctx));
+base::Status BuildProto::Run(BuildProto::Context* ctx,
+                             size_t argc,
+                             sqlite3_value** argv,
+                             SqlValue& out,
+                             Destructors& destructors) {
   if (argc % 2 != 0) {
-    base::Status error =
-        base::ErrStatus("Invalid number of args to %s BuildProto (got %d)",
-                        fn_ctx->desc->full_name().c_str(), argc);
-    sqlite3_result_error(ctx, error.c_message(), -1);
-    return;
+    return base::ErrStatus("Invalid number of args to %s BuildProto (got %zu)",
+                           ctx->desc->full_name().c_str(), argc);
   }
 
-  ProtoBuilder builder(fn_ctx->pool, fn_ctx->desc);
-  for (int i = 0; i < argc; i += 2) {
+  ProtoBuilder builder(ctx->pool, ctx->desc);
+  for (size_t i = 0; i < argc; i += 2) {
     if (sqlite3_value_type(argv[i]) != SQLITE_TEXT) {
-      sqlite3_result_error(ctx, "BuildProto: Invalid args", -1);
-      return;
+      return base::ErrStatus("BuildProto: Invalid args");
     }
 
     auto* key = reinterpret_cast<const char*>(sqlite3_value_text(argv[i]));
-    auto value = SqlValueFromSqliteValue(argv[i + 1]);
-    auto status = builder.AppendSqlValue(key, value);
-    if (!status.ok()) {
-      sqlite3_result_error(ctx, status.c_message(), -1);
-      return;
-    }
+    auto value = sqlite_utils::SqliteValueToSqlValue(argv[i + 1]);
+    RETURN_IF_ERROR(builder.AppendSqlValue(key, value));
   }
 
   // Even if the message is empty, we don't return null here as we want the
@@ -637,49 +595,49 @@
   if (raw.empty()) {
     // Passing nullptr to SQLite feels dangerous so just pass an empty string
     // and zero as the size so we don't deref nullptr accidentially somewhere.
-    sqlite3_result_blob(ctx, "", 0, nullptr);
-    return;
+    destructors.bytes_destructor = sqlite_utils::kSqliteStatic;
+    out = SqlValue::Bytes("", 0);
+    return base::OkStatus();
   }
 
   std::unique_ptr<uint8_t[], base::FreeDeleter> data(
       static_cast<uint8_t*>(malloc(raw.size())));
   memcpy(data.get(), raw.data(), raw.size());
-  sqlite3_result_blob(ctx, data.release(), static_cast<int>(raw.size()), free);
+
+  destructors.bytes_destructor = free;
+  out = SqlValue::Bytes(data.release(), raw.size());
+  return base::OkStatus();
 }
 
-void RunMetric(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
-  auto* fn_ctx = static_cast<RunMetricContext*>(sqlite3_user_data(ctx));
-  if (argc == 0 || sqlite3_value_type(argv[0]) != SQLITE_TEXT) {
-    sqlite3_result_error(ctx, "RUN_METRIC: Invalid arguments", -1);
-    return;
-  }
+base::Status RunMetric::Run(RunMetric::Context* ctx,
+                            size_t argc,
+                            sqlite3_value** argv,
+                            SqlValue&,
+                            Destructors&) {
+  if (argc == 0 || sqlite3_value_type(argv[0]) != SQLITE_TEXT)
+    return base::ErrStatus("RUN_METRIC: Invalid arguments");
 
   const char* path = reinterpret_cast<const char*>(sqlite3_value_text(argv[0]));
   auto metric_it = std::find_if(
-      fn_ctx->metrics->begin(), fn_ctx->metrics->end(),
+      ctx->metrics->begin(), ctx->metrics->end(),
       [path](const SqlMetricFile& metric) { return metric.path == path; });
-  if (metric_it == fn_ctx->metrics->end()) {
-    sqlite3_result_error(ctx, "RUN_METRIC: Unknown filename provided", -1);
-    return;
-  }
+  if (metric_it == ctx->metrics->end())
+    return base::ErrStatus("RUN_METRIC: Unknown filename provided");
   const auto& sql = metric_it->sql;
 
   std::unordered_map<std::string, std::string> substitutions;
-  for (int i = 1; i < argc; i += 2) {
-    if (sqlite3_value_type(argv[i]) != SQLITE_TEXT) {
-      sqlite3_result_error(ctx, "RUN_METRIC: all keys must be strings", -1);
-      return;
-    }
+  for (size_t i = 1; i < argc; i += 2) {
+    if (sqlite3_value_type(argv[i]) != SQLITE_TEXT)
+      return base::ErrStatus("RUN_METRIC: all keys must be strings");
 
-    base::Optional<std::string> key_str =
-        SqlValueToString(SqlValueFromSqliteValue(argv[i]));
-    base::Optional<std::string> value_str =
-        SqlValueToString(SqlValueFromSqliteValue(argv[i + 1]));
+    base::Optional<std::string> key_str = sqlite_utils::SqlValueToString(
+        sqlite_utils::SqliteValueToSqlValue(argv[i]));
+    base::Optional<std::string> value_str = sqlite_utils::SqlValueToString(
+        sqlite_utils::SqliteValueToSqlValue(argv[i + 1]));
 
     if (!value_str) {
-      sqlite3_result_error(
-          ctx, "RUN_METRIC: all values must be convertible to strings", -1);
-      return;
+      return base::ErrStatus(
+          "RUN_METRIC: all values must be convertible to strings");
     }
     substitutions[*key_str] = *value_str;
   }
@@ -692,57 +650,49 @@
     std::string buffer;
     int ret = TemplateReplace(trimmed, substitutions, &buffer);
     if (ret) {
-      char* error = sqlite3_mprintf(
+      return base::ErrStatus(
           "RUN_METRIC: Error when performing substitutions: %s", query.c_str());
-      sqlite3_result_error(ctx, error, -1);
-      return;
     }
 
     PERFETTO_DLOG("RUN_METRIC: Executing query: %s", buffer.c_str());
-    auto it = fn_ctx->tp->ExecuteQuery(buffer);
+    auto it = ctx->tp->ExecuteQuery(buffer);
     it.Next();
 
     base::Status status = it.Status();
     if (!status.ok()) {
-      char* error =
-          sqlite3_mprintf("RUN_METRIC: Error when running file %s: %s", path,
-                          status.c_message());
-      sqlite3_result_error(ctx, error, -1);
-      sqlite3_free(error);
-      return;
+      return base::ErrStatus("RUN_METRIC: Error when running file %s: %s", path,
+                             status.c_message());
     }
   }
-  sqlite3_result_null(ctx);
+  return base::OkStatus();
 }
 
-void UnwrapMetricProto(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
+base::Status UnwrapMetricProto::Run(Context*,
+                                    size_t argc,
+                                    sqlite3_value** argv,
+                                    SqlValue& out,
+                                    Destructors& destructors) {
   if (argc != 2) {
-    sqlite3_result_error(ctx,
-                         "UNWRAP_METRIC_PROTO: Expected exactly proto and "
-                         "message type as arguments",
-                         -1);
-    return;
+    return base::ErrStatus(
+        "UNWRAP_METRIC_PROTO: Expected exactly proto and message type as "
+        "arguments");
   }
 
-  SqlValue proto = SqlValueFromSqliteValue(argv[0]);
-  SqlValue message_type = SqlValueFromSqliteValue(argv[1]);
+  SqlValue proto = sqlite_utils::SqliteValueToSqlValue(argv[0]);
+  SqlValue message_type = sqlite_utils::SqliteValueToSqlValue(argv[1]);
 
-  if (proto.type != SqlValue::Type::kBytes) {
-    sqlite3_result_error(ctx, "UNWRAP_METRIC_PROTO: proto is not a blob", -1);
-    return;
-  }
+  if (proto.type != SqlValue::Type::kBytes)
+    return base::ErrStatus("UNWRAP_METRIC_PROTO: proto is not a blob");
 
-  if (message_type.type != SqlValue::Type::kString) {
-    sqlite3_result_error(ctx, "UNWRAP_METRIC_PROTO: message type is not string",
-                         -1);
-    return;
-  }
+  if (message_type.type != SqlValue::Type::kString)
+    return base::ErrStatus("UNWRAP_METRIC_PROTO: message type is not string");
 
   const uint8_t* ptr = static_cast<const uint8_t*>(proto.AsBytes());
   size_t size = proto.bytes_count;
   if (size == 0) {
-    sqlite3_result_blob(ctx, "", 0, nullptr);
-    return;
+    destructors.bytes_destructor = sqlite_utils::kSqliteStatic;
+    out = SqlValue::Bytes("", 0);
+    return base::OkStatus();
   }
 
   static constexpr uint32_t kMessageType =
@@ -750,17 +700,17 @@
   protozero::ConstBytes bytes;
   base::Status validation = ValidateSingleNonEmptyMessage(
       ptr, size, kMessageType, message_type.AsString(), &bytes);
-  if (!validation.ok()) {
-    base::Status res =
-        util::ErrStatus("UNWRAP_METRICS_PROTO: %s", validation.c_message());
-    sqlite3_result_error(ctx, res.c_message(), -1);
-    return;
-  }
+  if (!validation.ok())
+    return base::ErrStatus("UNWRAP_METRICS_PROTO: %s", validation.c_message());
 
   std::unique_ptr<uint8_t[], base::FreeDeleter> data(
       static_cast<uint8_t*>(malloc(bytes.size)));
   memcpy(data.get(), bytes.data, bytes.size);
-  sqlite3_result_blob(ctx, data.release(), static_cast<int>(bytes.size), free);
+
+  destructors.bytes_destructor = free;
+  out = SqlValue::Bytes(data.release(), bytes.size);
+
+  return base::OkStatus();
 }
 
 base::Status ComputeMetrics(TraceProcessor* tp,
diff --git a/src/trace_processor/metrics/metrics.h b/src/trace_processor/metrics/metrics.h
index c6cdb35..3292e30 100644
--- a/src/trace_processor/metrics/metrics.h
+++ b/src/trace_processor/metrics/metrics.h
@@ -27,6 +27,7 @@
 #include "perfetto/protozero/message.h"
 #include "perfetto/protozero/scattered_heap_buffer.h"
 #include "perfetto/trace_processor/trace_processor.h"
+#include "src/trace_processor/sqlite/register_function.h"
 #include "src/trace_processor/util/descriptors.h"
 
 #include "protos/perfetto/trace_processor/metrics_impl.pbzero.h"
@@ -149,34 +150,54 @@
     std::string* out);
 
 // Implements the NULL_IF_EMPTY SQL function.
-void NullIfEmpty(sqlite3_context* ctx, int argc, sqlite3_value** argv);
+struct NullIfEmpty : public SqlFunction {
+  static base::Status Run(void* ctx,
+                          size_t argc,
+                          sqlite3_value** argv,
+                          SqlValue& out,
+                          Destructors&);
+};
+
+// Implements all the proto creation functions.
+struct BuildProto : public SqlFunction {
+  struct Context {
+    TraceProcessor* tp;
+    const DescriptorPool* pool;
+    const ProtoDescriptor* desc;
+  };
+  static base::Status Run(Context* ctx,
+                          size_t argc,
+                          sqlite3_value** argv,
+                          SqlValue& out,
+                          Destructors&);
+};
+
+// Implements the RUN_METRIC SQL function.
+struct RunMetric : public SqlFunction {
+  struct Context {
+    TraceProcessor* tp;
+    std::vector<SqlMetricFile>* metrics;
+  };
+  static base::Status Run(Context* ctx,
+                          size_t argc,
+                          sqlite3_value** argv,
+                          SqlValue& out,
+                          Destructors&);
+};
+
+// Implements the UNWRAP_METRIC_PROTO SQL function.
+struct UnwrapMetricProto : public SqlFunction {
+  static base::Status Run(Context* ctx,
+                          size_t argc,
+                          sqlite3_value** argv,
+                          SqlValue& out,
+                          Destructors&);
+};
 
 // These functions implement the RepeatedField SQL aggregate functions.
 void RepeatedFieldStep(sqlite3_context* ctx, int argc, sqlite3_value** argv);
 void RepeatedFieldFinal(sqlite3_context* ctx);
 
-// Context struct for the below function.
-struct BuildProtoContext {
-  TraceProcessor* tp;
-  const DescriptorPool* pool;
-  const ProtoDescriptor* desc;
-};
-
-// This function implements all the proto creation functions.
-void BuildProto(sqlite3_context* ctx, int argc, sqlite3_value** argv);
-
-// Context struct for the below function.
-struct RunMetricContext {
-  TraceProcessor* tp;
-  std::vector<SqlMetricFile>* metrics;
-};
-
-// Implements the RUN_METRIC SQL function.
-void RunMetric(sqlite3_context* ctx, int argc, sqlite3_value** argv);
-
-// Implements the UNWRAP_METRIC_PROTO SQL function.
-void UnwrapMetricProto(sqlite3_context* ctx, int argc, sqlite3_value** argv);
-
 base::Status ComputeMetrics(TraceProcessor* impl,
                             const std::vector<std::string> metrics_to_compute,
                             const std::vector<SqlMetricFile>& metrics,
diff --git a/src/trace_processor/sqlite/BUILD.gn b/src/trace_processor/sqlite/BUILD.gn
index e19fbbf..af1adbe 100644
--- a/src/trace_processor/sqlite/BUILD.gn
+++ b/src/trace_processor/sqlite/BUILD.gn
@@ -22,6 +22,8 @@
       "query_cache.h",
       "query_constraints.cc",
       "query_constraints.h",
+      "register_function.cc",
+      "register_function.h",
       "scoped_db.h",
       "span_join_operator_table.cc",
       "span_join_operator_table.h",
diff --git a/src/trace_processor/sqlite/db_sqlite_table.cc b/src/trace_processor/sqlite/db_sqlite_table.cc
index 8dc57f9..bc2ee69 100644
--- a/src/trace_processor/sqlite/db_sqlite_table.cc
+++ b/src/trace_processor/sqlite/db_sqlite_table.cc
@@ -574,35 +574,15 @@
   SqlValue value = mode_ == Mode::kSingleRow
                        ? SourceTable()->GetColumn(column).Get(*single_row_)
                        : iterator_->Get(column);
-  switch (value.type) {
-    case SqlValue::Type::kLong:
-      sqlite3_result_int64(ctx, value.long_value);
-      break;
-    case SqlValue::Type::kDouble:
-      sqlite3_result_double(ctx, value.double_value);
-      break;
-    case SqlValue::Type::kString: {
-      // We can say kSqliteStatic here because all strings are expected to
-      // come from the string pool and thus will be valid for the lifetime
-      // of trace processor.
-      sqlite3_result_text(ctx, value.string_value, -1,
-                          sqlite_utils::kSqliteStatic);
-      break;
-    }
-    case SqlValue::Type::kBytes: {
-      // We can say kSqliteStatic here because for our iterator will hold
-      // onto the pointer as long as we don't call Next() but that only
-      // happens with Next() is called on the Cursor itself at which point
-      // SQLite no longer cares about the bytes pointer.
-      sqlite3_result_blob(ctx, value.bytes_value,
-                          static_cast<int>(value.bytes_count),
-                          sqlite_utils::kSqliteStatic);
-      break;
-    }
-    case SqlValue::Type::kNull:
-      sqlite3_result_null(ctx);
-      break;
-  }
+  // We can say kSqliteStatic for strings  because all strings are expected to
+  // come from the string pool and thus will be valid for the lifetime
+  // of trace processor.
+  // Similarily for bytes we can also use kSqliteStatic because for our iterator
+  // will hold onto the pointer as long as we don't call Next() but that only
+  // happens with Next() is called on the Cursor itself at which point
+  // SQLite no longer cares about the bytes pointer.
+  sqlite_utils::ReportSqlValue(ctx, value, sqlite_utils::kSqliteStatic,
+                               sqlite_utils::kSqliteStatic);
   return SQLITE_OK;
 }
 
diff --git a/src/trace_processor/sqlite/register_function.cc b/src/trace_processor/sqlite/register_function.cc
new file mode 100644
index 0000000..431e4d6
--- /dev/null
+++ b/src/trace_processor/sqlite/register_function.cc
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/sqlite/register_function.h"
+#include "sqlite3.h"
+#include "src/trace_processor/sqlite/sqlite_utils.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+base::Status SqlFunction::Cleanup(Context*) {
+  return base::OkStatus();
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/sqlite/register_function.h b/src/trace_processor/sqlite/register_function.h
new file mode 100644
index 0000000..0df7dcc
--- /dev/null
+++ b/src/trace_processor/sqlite/register_function.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_SQLITE_REGISTER_FUNCTION_H_
+#define SRC_TRACE_PROCESSOR_SQLITE_REGISTER_FUNCTION_H_
+
+#include <sqlite3.h>
+#include <cstddef>
+#include <memory>
+#include <set>
+
+#include "perfetto/base/status.h"
+#include "perfetto/trace_processor/basic_types.h"
+#include "src/trace_processor/sqlite/scoped_db.h"
+#include "src/trace_processor/sqlite/sqlite_utils.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+// Prototype for a C++ function which can be registered with SQLite.
+//
+// Usage
+//
+// Define a subclass of this struct as follows:
+// struct YourFunction : public SqlFunction {
+//   // Optional if you want a custom context object (i.e. an object
+//   // passed in at registration time which will be passed to Run on
+//   // every invocation)
+//   struct YourContext { /* define context fields here */ };
+//
+//   static base::Status Run(/* see parameters below */) {
+//     /* function body here */
+//   }
+//
+//   static base::Status Cleanup(/* see parameters below */) {
+//     /* function body here */
+//   }
+// }
+//
+// Then, register this function with SQLite using RegisterFunction (see below);
+// you'll likely want to do this in TraceProcessorImpl:
+// RegisterFunction<YourFunction>(/* see arguments below */)
+struct SqlFunction {
+  // The type of the context object which will be passed to the function.
+  // Can be redefined in any sub-classes to override the context.
+  using Context = void;
+
+  // Struct which holds destructors for strings/bytes returned from the
+  // function. Passed as an argument to |Run| to allow implementations to
+  // override the destructors.
+  struct Destructors {
+    sqlite3_destructor_type string_destructor = sqlite_utils::kSqliteTransient;
+    sqlite3_destructor_type bytes_destructor = sqlite_utils::kSqliteTransient;
+  };
+
+  // The function which will be exectued with the arguments from SQL.
+  //
+  // Implementations MUST define this function themselves; this function is
+  // declared but *not* defined so linker errors will be thrown if not defined.
+  //
+  // |ctx|:         the context object passed at registration time.
+  // |argc|:        number of arguments.
+  // |argv|:        arguments to the function.
+  // |out|:         the return value of the function.
+  // |destructors|: destructors for string/bytes return values.
+  static base::Status Run(Context* ctx,
+                          size_t argc,
+                          sqlite3_value** argv,
+                          SqlValue& out,
+                          Destructors& destructors);
+
+  // Executed after the result from |Run| is reported to SQLite.
+  // Allows any pending state to be cleaned up post-copy of results by SQLite.
+  //
+  // Implementations do not need to define this function; a default no-op
+  // implementation will be used in this case.
+  static base::Status Cleanup(Context*);
+};
+
+// Registers a C++ function to be runnable from SQL.
+// The format of the function is given by the |SqlFunction|; see the
+// documentaion above.
+//
+// |db|:          sqlite3 database object
+// |name|:        name of the function in SQL
+// |argc|:        number of arguments for this function, -1 if variable
+// |ctx|:         context object for the function (see SqlFunction::Run above);
+//                this object *must* outlive the function so should likely be
+//                either static or scoped to the lifetime of TraceProcessor.
+// |determistic|: whether this function has deterministic output given the
+//                same set of arguments.
+template <typename Function>
+base::Status RegisterSqlFunction(sqlite3* db,
+                                 const char* name,
+                                 int argc,
+                                 typename Function::Context* ctx,
+                                 bool deterministic = true);
+
+// Same as above except allows a unique_ptr to be passed for the context; this
+// allows for SQLite to manage the lifetime of this pointer instead of the
+// essentially static requirement of the context pointer above.
+template <typename Function>
+base::Status RegisterSqlFunction(
+    sqlite3* db,
+    const char* name,
+    int argc,
+    std::unique_ptr<typename Function::Context> ctx,
+    bool deterministic = true);
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+// The rest of this file is just implementation details which we need
+// in the header file because it is templated code. We separate it out
+// like this to keep the API people actually care about easy to read.
+
+namespace perfetto {
+namespace trace_processor {
+
+namespace sqlite_internal {
+template <typename Function>
+void WrapSqlFunction(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
+  using Context = typename Function::Context;
+  auto* ud = static_cast<Context*>(sqlite3_user_data(ctx));
+
+  SqlValue value{};
+  SqlFunction::Destructors destructors{};
+  base::Status status =
+      Function::Run(ud, static_cast<size_t>(argc), argv, value, destructors);
+
+  if (!status.ok()) {
+    sqlite3_result_error(ctx, status.c_message(), -1);
+    return;
+  }
+  sqlite_utils::ReportSqlValue(ctx, value, destructors.string_destructor,
+                               destructors.bytes_destructor);
+
+  status = Function::Cleanup(ud);
+  if (!status.ok()) {
+    sqlite3_result_error(ctx, status.c_message(), -1);
+    return;
+  }
+}
+}  // namespace sqlite_internal
+
+template <typename Function>
+base::Status RegisterSqlFunction(sqlite3* db,
+                                 const char* name,
+                                 int argc,
+                                 typename Function::Context* ctx,
+                                 bool deterministic) {
+  int flags = SQLITE_UTF8 | (deterministic ? SQLITE_DETERMINISTIC : 0);
+  int ret = sqlite3_create_function_v2(
+      db, name, static_cast<int>(argc), flags, ctx,
+      sqlite_internal::WrapSqlFunction<Function>, nullptr, nullptr, nullptr);
+  if (ret != SQLITE_OK) {
+    return base::ErrStatus("Unable to register function with name %s", name);
+  }
+  return base::OkStatus();
+}
+
+template <typename Function>
+base::Status RegisterSqlFunction(
+    sqlite3* db,
+    const char* name,
+    int argc,
+    std::unique_ptr<typename Function::Context> user_data,
+    bool deterministic) {
+  int flags = SQLITE_UTF8 | (deterministic ? SQLITE_DETERMINISTIC : 0);
+  int ret = sqlite3_create_function_v2(
+      db, name, static_cast<int>(argc), flags, user_data.release(),
+      sqlite_internal::WrapSqlFunction<Function>, nullptr, nullptr,
+      [](void* ptr) { delete static_cast<typename Function::Context*>(ptr); });
+  if (ret != SQLITE_OK) {
+    return base::ErrStatus("Unable to register function with name %s", name);
+  }
+  return base::OkStatus();
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+#endif  // SRC_TRACE_PROCESSOR_SQLITE_REGISTER_FUNCTION_H_
diff --git a/src/trace_processor/sqlite/sqlite_utils.h b/src/trace_processor/sqlite/sqlite_utils.h
index 750fb5c..66a3ed6 100644
--- a/src/trace_processor/sqlite/sqlite_utils.h
+++ b/src/trace_processor/sqlite/sqlite_utils.h
@@ -27,6 +27,7 @@
 #include "perfetto/base/logging.h"
 #include "perfetto/ext/base/optional.h"
 #include "perfetto/ext/base/string_utils.h"
+#include "perfetto/trace_processor/basic_types.h"
 #include "src/trace_processor/sqlite/scoped_db.h"
 #include "src/trace_processor/sqlite/sqlite_table.h"
 
@@ -37,327 +38,95 @@
 const auto kSqliteStatic = reinterpret_cast<sqlite3_destructor_type>(0);
 const auto kSqliteTransient = reinterpret_cast<sqlite3_destructor_type>(-1);
 
-template <typename T>
-using is_numeric =
-    typename std::enable_if<std::is_arithmetic<T>::value, T>::type;
-
-template <typename T>
-using is_float =
-    typename std::enable_if<std::is_floating_point<T>::value, T>::type;
-
-template <typename T>
-using is_int = typename std::enable_if<std::is_integral<T>::value, T>::type;
-
 inline bool IsOpEq(int op) {
   return op == SQLITE_INDEX_CONSTRAINT_EQ;
 }
 
-inline bool IsOpGe(int op) {
-  return op == SQLITE_INDEX_CONSTRAINT_GE;
-}
-
-inline bool IsOpGt(int op) {
-  return op == SQLITE_INDEX_CONSTRAINT_GT;
-}
-
 inline bool IsOpLe(int op) {
   return op == SQLITE_INDEX_CONSTRAINT_LE;
 }
 
-inline bool IsOpLt(int op) {
-  return op == SQLITE_INDEX_CONSTRAINT_LT;
+inline SqlValue::Type SqliteTypeToSqlValueType(int sqlite_type) {
+  switch (sqlite_type) {
+    case SQLITE_NULL:
+      return SqlValue::Type::kNull;
+    case SQLITE_BLOB:
+      return SqlValue::Type::kBytes;
+    case SQLITE_INTEGER:
+      return SqlValue::Type::kLong;
+    case SQLITE_FLOAT:
+      return SqlValue::Type::kDouble;
+    case SQLITE_TEXT:
+      return SqlValue::Type::kString;
+  }
+  PERFETTO_FATAL("Unknown SQLite type %d", sqlite_type);
 }
 
-inline bool IsOpIsNull(int op) {
-  return op == SQLITE_INDEX_CONSTRAINT_ISNULL;
+inline SqlValue SqliteValueToSqlValue(sqlite3_value* value) {
+  SqlValue sql_value;
+  switch (sqlite3_value_type(value)) {
+    case SQLITE_INTEGER:
+      sql_value.type = SqlValue::Type::kLong;
+      sql_value.long_value = sqlite3_value_int64(value);
+      break;
+    case SQLITE_FLOAT:
+      sql_value.type = SqlValue::Type::kDouble;
+      sql_value.double_value = sqlite3_value_double(value);
+      break;
+    case SQLITE_TEXT:
+      sql_value.type = SqlValue::Type::kString;
+      sql_value.string_value =
+          reinterpret_cast<const char*>(sqlite3_value_text(value));
+      break;
+    case SQLITE_BLOB:
+      sql_value.type = SqlValue::Type::kBytes;
+      sql_value.bytes_value = sqlite3_value_blob(value);
+      sql_value.bytes_count = static_cast<size_t>(sqlite3_value_bytes(value));
+      break;
+  }
+  return sql_value;
 }
 
-inline bool IsOpIsNotNull(int op) {
-  return op == SQLITE_INDEX_CONSTRAINT_ISNOTNULL;
+inline base::Optional<std::string> SqlValueToString(SqlValue value) {
+  switch (value.type) {
+    case SqlValue::Type::kString:
+      return value.AsString();
+    case SqlValue::Type::kDouble:
+      return std::to_string(value.AsDouble());
+    case SqlValue::Type::kLong:
+      return std::to_string(value.AsLong());
+    case SqlValue::Type::kBytes:
+    case SqlValue::Type::kNull:
+      return base::nullopt;
+  }
+  PERFETTO_FATAL("For GCC");
 }
 
-template <typename T>
-T ExtractSqliteValue(sqlite3_value* value);
-
-template <>
-inline uint8_t ExtractSqliteValue(sqlite3_value* value) {
-  auto type = sqlite3_value_type(value);
-  PERFETTO_DCHECK(type == SQLITE_INTEGER);
-  return static_cast<uint8_t>(sqlite3_value_int(value));
-}
-
-template <>
-inline uint32_t ExtractSqliteValue(sqlite3_value* value) {
-  auto type = sqlite3_value_type(value);
-  PERFETTO_DCHECK(type == SQLITE_INTEGER);
-  return static_cast<uint32_t>(sqlite3_value_int64(value));
-}
-
-template <>
-inline int32_t ExtractSqliteValue(sqlite3_value* value) {
-  auto type = sqlite3_value_type(value);
-  PERFETTO_DCHECK(type == SQLITE_INTEGER);
-  return sqlite3_value_int(value);
-}
-
-template <>
-inline int64_t ExtractSqliteValue(sqlite3_value* value) {
-  auto type = sqlite3_value_type(value);
-  PERFETTO_DCHECK(type == SQLITE_INTEGER);
-  return static_cast<int64_t>(sqlite3_value_int64(value));
-}
-
-template <>
-inline double ExtractSqliteValue(sqlite3_value* value) {
-  auto type = sqlite3_value_type(value);
-  PERFETTO_DCHECK(type == SQLITE_FLOAT || type == SQLITE_INTEGER);
-  return sqlite3_value_double(value);
-}
-
-template <>
-inline bool ExtractSqliteValue(sqlite3_value* value) {
-  auto type = sqlite3_value_type(value);
-  PERFETTO_DCHECK(type == SQLITE_INTEGER);
-  return static_cast<bool>(sqlite3_value_int(value));
-}
-
-// Do not add a uint64_t version of ExtractSqliteValue. You should not be using
-// uint64_t at all given that SQLite doesn't support it.
-
-template <>
-inline const char* ExtractSqliteValue(sqlite3_value* value) {
-  auto type = sqlite3_value_type(value);
-  PERFETTO_DCHECK(type == SQLITE_TEXT);
-  return reinterpret_cast<const char*>(sqlite3_value_text(value));
-}
-
-template <>
-inline std::string ExtractSqliteValue(sqlite3_value* value) {
-  return ExtractSqliteValue<const char*>(value);
-}
-
-template <typename T>
-class NumericPredicate {
- public:
-  NumericPredicate(int op, T constant) : op_(op), constant_(constant) {}
-
-  PERFETTO_ALWAYS_INLINE bool operator()(T other) const {
-    switch (op_) {
-      case SQLITE_INDEX_CONSTRAINT_ISNULL:
-        return false;
-      case SQLITE_INDEX_CONSTRAINT_ISNOTNULL:
-        return true;
-      case SQLITE_INDEX_CONSTRAINT_EQ:
-      case SQLITE_INDEX_CONSTRAINT_IS:
-        return std::equal_to<T>()(other, constant_);
-      case SQLITE_INDEX_CONSTRAINT_NE:
-      case SQLITE_INDEX_CONSTRAINT_ISNOT:
-        return std::not_equal_to<T>()(other, constant_);
-      case SQLITE_INDEX_CONSTRAINT_GE:
-        return std::greater_equal<T>()(other, constant_);
-      case SQLITE_INDEX_CONSTRAINT_GT:
-        return std::greater<T>()(other, constant_);
-      case SQLITE_INDEX_CONSTRAINT_LE:
-        return std::less_equal<T>()(other, constant_);
-      case SQLITE_INDEX_CONSTRAINT_LT:
-        return std::less<T>()(other, constant_);
-      default:
-        PERFETTO_FATAL("For GCC");
+inline void ReportSqlValue(
+    sqlite3_context* ctx,
+    const SqlValue& value,
+    sqlite3_destructor_type string_destructor = kSqliteTransient,
+    sqlite3_destructor_type bytes_destructor = kSqliteTransient) {
+  switch (value.type) {
+    case SqlValue::Type::kLong:
+      sqlite3_result_int64(ctx, value.long_value);
+      break;
+    case SqlValue::Type::kDouble:
+      sqlite3_result_double(ctx, value.double_value);
+      break;
+    case SqlValue::Type::kString: {
+      sqlite3_result_text(ctx, value.string_value, -1, string_destructor);
+      break;
     }
+    case SqlValue::Type::kBytes:
+      sqlite3_result_blob(ctx, value.bytes_value,
+                          static_cast<int>(value.bytes_count),
+                          bytes_destructor);
+      break;
+    case SqlValue::Type::kNull:
+      sqlite3_result_null(ctx);
+      break;
   }
-
- private:
-  int op_;
-  T constant_;
-};
-
-template <typename T, typename sqlite_utils::is_numeric<T>* = nullptr>
-NumericPredicate<T> CreateNumericPredicate(int op, sqlite3_value* value) {
-  T extracted =
-      IsOpIsNull(op) || IsOpIsNotNull(op) ? 0 : ExtractSqliteValue<T>(value);
-  return NumericPredicate<T>(op, extracted);
-}
-
-inline std::function<bool(const char*)> CreateStringPredicate(
-    int op,
-    sqlite3_value* value) {
-  switch (op) {
-    case SQLITE_INDEX_CONSTRAINT_ISNULL:
-      return [](const char* f) { return f == nullptr; };
-    case SQLITE_INDEX_CONSTRAINT_ISNOTNULL:
-      return [](const char* f) { return f != nullptr; };
-  }
-
-  const char* val = reinterpret_cast<const char*>(sqlite3_value_text(value));
-
-  // If the value compared against is null, then to stay consistent with SQL
-  // handling, we have to return false for non-null operators.
-  if (val == nullptr) {
-    PERFETTO_CHECK(op != SQLITE_INDEX_CONSTRAINT_IS &&
-                   op != SQLITE_INDEX_CONSTRAINT_ISNOT);
-    return [](const char*) { return false; };
-  }
-
-  switch (op) {
-    case SQLITE_INDEX_CONSTRAINT_EQ:
-    case SQLITE_INDEX_CONSTRAINT_IS:
-      return [val](const char* str) {
-        return str != nullptr && strcmp(str, val) == 0;
-      };
-    case SQLITE_INDEX_CONSTRAINT_NE:
-    case SQLITE_INDEX_CONSTRAINT_ISNOT:
-      return [val](const char* str) {
-        return str != nullptr && strcmp(str, val) != 0;
-      };
-    case SQLITE_INDEX_CONSTRAINT_GE:
-      return [val](const char* str) {
-        return str != nullptr && strcmp(str, val) >= 0;
-      };
-    case SQLITE_INDEX_CONSTRAINT_GT:
-      return [val](const char* str) {
-        return str != nullptr && strcmp(str, val) > 0;
-      };
-    case SQLITE_INDEX_CONSTRAINT_LE:
-      return [val](const char* str) {
-        return str != nullptr && strcmp(str, val) <= 0;
-      };
-    case SQLITE_INDEX_CONSTRAINT_LT:
-      return [val](const char* str) {
-        return str != nullptr && strcmp(str, val) < 0;
-      };
-    case SQLITE_INDEX_CONSTRAINT_LIKE:
-      return [val](const char* str) {
-        return str != nullptr && sqlite3_strlike(val, str, 0) == 0;
-      };
-    case SQLITE_INDEX_CONSTRAINT_GLOB:
-      return [val](const char* str) {
-        return str != nullptr && sqlite3_strglob(val, str) == 0;
-      };
-    default:
-      PERFETTO_FATAL("For GCC");
-  }
-}
-
-// Greater bound for floating point numbers.
-template <typename T, typename sqlite_utils::is_float<T>* = nullptr>
-T FindGtBound(bool is_eq, sqlite3_value* sqlite_val) {
-  constexpr auto kMax = static_cast<long double>(std::numeric_limits<T>::max());
-  auto type = sqlite3_value_type(sqlite_val);
-  if (type != SQLITE_INTEGER && type != SQLITE_FLOAT) {
-    return kMax;
-  }
-
-  // If this is a strict gt bound then just get the next highest float
-  // after value.
-  auto value = ExtractSqliteValue<T>(sqlite_val);
-  return is_eq ? value : nexttoward(value, kMax);
-}
-
-template <typename T, typename sqlite_utils::is_int<T>* = nullptr>
-T FindGtBound(bool is_eq, sqlite3_value* sqlite_val) {
-  auto type = sqlite3_value_type(sqlite_val);
-  if (type == SQLITE_INTEGER) {
-    auto value = ExtractSqliteValue<T>(sqlite_val);
-    return is_eq ? value : value + 1;
-  } else if (type == SQLITE_FLOAT) {
-    auto value = ExtractSqliteValue<double>(sqlite_val);
-    auto above = ceil(value);
-    auto cast = static_cast<T>(above);
-    return value < above ? cast : (is_eq ? cast : cast + 1);
-  } else {
-    return std::numeric_limits<T>::max();
-  }
-}
-
-template <typename T, typename sqlite_utils::is_float<T>* = nullptr>
-T FindLtBound(bool is_eq, sqlite3_value* sqlite_val) {
-  constexpr auto kMin =
-      static_cast<long double>(std::numeric_limits<T>::lowest());
-  auto type = sqlite3_value_type(sqlite_val);
-  if (type != SQLITE_INTEGER && type != SQLITE_FLOAT) {
-    return kMin;
-  }
-
-  // If this is a strict lt bound then just get the next lowest float
-  // before value.
-  auto value = ExtractSqliteValue<T>(sqlite_val);
-  return is_eq ? value : nexttoward(value, kMin);
-}
-
-template <typename T, typename sqlite_utils::is_int<T>* = nullptr>
-T FindLtBound(bool is_eq, sqlite3_value* sqlite_val) {
-  auto type = sqlite3_value_type(sqlite_val);
-  if (type == SQLITE_INTEGER) {
-    auto value = ExtractSqliteValue<T>(sqlite_val);
-    return is_eq ? value : value - 1;
-  } else if (type == SQLITE_FLOAT) {
-    auto value = ExtractSqliteValue<double>(sqlite_val);
-    auto below = floor(value);
-    auto cast = static_cast<T>(below);
-    return value > below ? cast : (is_eq ? cast : cast - 1);
-  } else {
-    return std::numeric_limits<T>::max();
-  }
-}
-
-template <typename T, typename sqlite_utils::is_float<T>* = nullptr>
-T FindEqBound(sqlite3_value* sqlite_val) {
-  auto type = sqlite3_value_type(sqlite_val);
-  if (type != SQLITE_INTEGER && type != SQLITE_FLOAT) {
-    return std::numeric_limits<T>::max();
-  }
-  return ExtractSqliteValue<T>(sqlite_val);
-}
-
-template <typename T, typename sqlite_utils::is_int<T>* = nullptr>
-T FindEqBound(sqlite3_value* sqlite_val) {
-  auto type = sqlite3_value_type(sqlite_val);
-  if (type == SQLITE_INTEGER) {
-    return ExtractSqliteValue<T>(sqlite_val);
-  } else if (type == SQLITE_FLOAT) {
-    auto value = ExtractSqliteValue<double>(sqlite_val);
-    auto below = floor(value);
-    auto cast = static_cast<T>(below);
-    return value > below ? std::numeric_limits<T>::max() : cast;
-  } else {
-    return std::numeric_limits<T>::max();
-  }
-}
-
-template <typename T>
-void ReportSqliteResult(sqlite3_context*, T value);
-
-// Do not add a uint64_t version of ReportSqliteResult. You should not be using
-// uint64_t at all given that SQLite doesn't support it.
-
-template <>
-inline void ReportSqliteResult(sqlite3_context* ctx, int32_t value) {
-  sqlite3_result_int(ctx, value);
-}
-
-template <>
-inline void ReportSqliteResult(sqlite3_context* ctx, int64_t value) {
-  sqlite3_result_int64(ctx, value);
-}
-
-template <>
-inline void ReportSqliteResult(sqlite3_context* ctx, uint8_t value) {
-  sqlite3_result_int(ctx, value);
-}
-
-template <>
-inline void ReportSqliteResult(sqlite3_context* ctx, uint32_t value) {
-  sqlite3_result_int64(ctx, value);
-}
-
-template <>
-inline void ReportSqliteResult(sqlite3_context* ctx, bool value) {
-  sqlite3_result_int(ctx, value);
-}
-
-template <>
-inline void ReportSqliteResult(sqlite3_context* ctx, double value) {
-  sqlite3_result_double(ctx, value);
 }
 
 inline util::Status GetColumnsForTable(
@@ -409,7 +178,8 @@
     } else if (base::CaseInsensitiveEqual(raw_type, "BIG INT") ||
                base::CaseInsensitiveEqual(raw_type, "UNSIGNED INT") ||
                base::CaseInsensitiveEqual(raw_type, "INT") ||
-               base::CaseInsensitiveEqual(raw_type, "BOOLEAN")) {
+               base::CaseInsensitiveEqual(raw_type, "BOOLEAN") ||
+               base::CaseInsensitiveEqual(raw_type, "INTEGER")) {
       type = SqlValue::Type::kLong;
     } else if (!*raw_type) {
       PERFETTO_DLOG("Unknown column type for %s %s", raw_table_name.c_str(),
@@ -424,16 +194,6 @@
   return util::OkStatus();
 }
 
-template <typename T>
-int CompareValuesAsc(const T& f, const T& s) {
-  return f < s ? -1 : (f > s ? 1 : 0);
-}
-
-template <typename T>
-int CompareValuesDesc(const T& f, const T& s) {
-  return -CompareValuesAsc(f, s);
-}
-
 }  // namespace sqlite_utils
 }  // namespace trace_processor
 }  // namespace perfetto
diff --git a/src/trace_processor/tables/metadata_tables.h b/src/trace_processor/tables/metadata_tables.h
index 3f1bb90..27fe0ef 100644
--- a/src/trace_processor/tables/metadata_tables.h
+++ b/src/trace_processor/tables/metadata_tables.h
@@ -58,12 +58,31 @@
 
 PERFETTO_TP_TABLE(PERFETTO_TP_METADATA_TABLE_DEF);
 
+// Contains information of threads seen during the trace
+//
 // @name thread
-// @param utid {uint32_t} Unique thread id. This is != the OS tid. This is a
-//        monotonic number associated to each thread. The OS thread id (tid)
-//        cannot be used as primary key because tids and pids are recycled
-//        by most kernels.
-// @param upid {@joinable process.upid}
+// @param utid             {uint32_t} Unique thread id. This is != the OS tid.
+//                         This is a monotonic number associated to each thread.
+//                         The OS thread id (tid) cannot be used as primary key
+//                         because tids and pids are recycled by most kernels.
+// @param tid              The OS id for this thread. Note: this is *not*
+//                         unique over the lifetime of the trace so cannot be
+//                         used as a primary key. Use |utid| instead.
+// @param name             The name of the thread. Can be populated from many
+//                         sources (e.g. ftrace, /proc scraping, track event
+//                         etc).
+// @param start_ts         The start timestamp of this thread (if known). Is
+//                         null in most cases unless a thread creation event is
+//                         enabled (e.g. task_newtask ftrace event on
+//                         Linux/Android).
+// @param end_ts           The end timestamp of this thread (if known). Is
+//                         null in most cases unless a thread destruction event
+//                         is enabled (e.g. sched_process_free ftrace event on
+//                         Linux/Android).
+// @param upid             {@joinable process.upid} The process hosting this
+//                         thread.
+// @param is_main_thread   Boolean indicating if this thread is the main thread
+//                         in the process.
 #define PERFETTO_TP_THREAD_TABLE_DEF(NAME, PARENT, C) \
   NAME(ThreadTable, "internal_thread")                \
   PERFETTO_TP_ROOT_TABLE(PARENT, C)                   \
@@ -76,12 +95,35 @@
 
 PERFETTO_TP_TABLE(PERFETTO_TP_THREAD_TABLE_DEF);
 
+// Contains information of processes seen during the trace
+//
 // @name process
-// @param upid {uint32_t} Unique process id. This is != the OS pid. This is a
-//        monotonic number associated to each process. The OS process id (pid)
-//        cannot be used as primary key because tids and pids are recycled by
-//        most kernels.
-// @param uid The Unix user id of the process {@joinable package_list.uid}.
+// @param upid            {uint32_t} Unique process id. This is != the OS pid.
+//                        This is a monotonic number associated to each process.
+//                        The OS process id (pid) cannot be used as primary key
+//                        because tids and pids are recycled by most kernels.
+// @param pid             The OS id for this process. Note: this is *not*
+//                        unique over the lifetime of the trace so cannot be
+//                        used as a primary key. Use |upid| instead.
+// @param name            The name of the process. Can be populated from many
+//                        sources (e.g. ftrace, /proc scraping, track event
+//                        etc).
+// @param start_ts        The start timestamp of this process (if known). Is
+//                        null in most cases unless a process creation event is
+//                        enabled (e.g. task_newtask ftrace event on
+//                        Linux/Android).
+// @param end_ts          The end timestamp of this process (if known). Is
+//                        null in most cases unless a process destruction event
+//                        is enabled (e.g. sched_process_free ftrace event on
+//                        Linux/Android).
+// @param parent_upid     {@joinable process.upid} The upid of the process which
+//                        caused this process to be spawned.
+// @param uid             {@joinable package_list.uid} The Unix user id of the
+//                        process.
+// @param android_appid   Android appid of this process.
+// @param cmdline         /proc/cmdline for this process.
+// @param arg_set_id      {@joinable args.arg_set_id} Extra args for this
+//                        process.
 #define PERFETTO_TP_PROCESS_TABLE_DEF(NAME, PARENT, C) \
   NAME(ProcessTable, "internal_process")               \
   PERFETTO_TP_ROOT_TABLE(PARENT, C)                    \
diff --git a/src/trace_processor/trace_processor_impl.cc b/src/trace_processor/trace_processor_impl.cc
index 6d02dfa..97cc591 100644
--- a/src/trace_processor/trace_processor_impl.cc
+++ b/src/trace_processor/trace_processor_impl.cc
@@ -18,8 +18,10 @@
 
 #include <algorithm>
 #include <cinttypes>
+#include <memory>
 
 #include "perfetto/base/logging.h"
+#include "perfetto/base/status.h"
 #include "perfetto/base/time.h"
 #include "perfetto/ext/base/string_splitter.h"
 #include "perfetto/ext/base/string_utils.h"
@@ -45,6 +47,7 @@
 #include "src/trace_processor/importers/proto/metadata_tracker.h"
 #include "src/trace_processor/importers/systrace/systrace_trace_parser.h"
 #include "src/trace_processor/iterator_impl.h"
+#include "src/trace_processor/sqlite/register_function.h"
 #include "src/trace_processor/sqlite/span_join_operator_table.h"
 #include "src/trace_processor/sqlite/sql_stats_table.h"
 #include "src/trace_processor/sqlite/sqlite3_str_split.h"
@@ -56,6 +59,7 @@
 #include "src/trace_processor/tp_metatrace.h"
 #include "src/trace_processor/types/variadic.h"
 #include "src/trace_processor/util/protozero_to_text.h"
+#include "src/trace_processor/util/status_macros.h"
 
 #include "protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.h"
 #include "protos/perfetto/trace/trace.pbzero.h"
@@ -87,6 +91,18 @@
     "SELECT tbl_name, type FROM (SELECT * FROM sqlite_master UNION ALL SELECT "
     "* FROM sqlite_temp_master)";
 
+template <typename SqlFunction, typename Ptr = typename SqlFunction::Context*>
+void RegisterFunction(sqlite3* db,
+                      const char* name,
+                      int argc,
+                      Ptr context = nullptr,
+                      bool deterministic = true) {
+  auto status = RegisterSqlFunction<SqlFunction>(
+      db, name, argc, std::move(context), deterministic);
+  if (!status.ok())
+    PERFETTO_ELOG("%s", status.c_message());
+}
+
 void InitializeSqlite(sqlite3* db) {
   char* error = nullptr;
   sqlite3_exec(db, "PRAGMA temp_store=2", 0, 0, &error);
@@ -287,47 +303,57 @@
   }
 }
 
-void ExportJson(sqlite3_context* ctx, int /*argc*/, sqlite3_value** argv) {
-  TraceStorage* storage = static_cast<TraceStorage*>(sqlite3_user_data(ctx));
+struct ExportJson : public SqlFunction {
+  using Context = TraceStorage;
+  static base::Status Run(TraceStorage* storage,
+                          size_t /*argc*/,
+                          sqlite3_value** argv,
+                          SqlValue& /*out*/,
+                          Destructors&);
+};
+
+base::Status ExportJson::Run(TraceStorage* storage,
+                             size_t /*argc*/,
+                             sqlite3_value** argv,
+                             SqlValue& /*out*/,
+                             Destructors&) {
   FILE* output;
   if (sqlite3_value_type(argv[0]) == SQLITE_INTEGER) {
     // Assume input is an FD.
     output = fdopen(sqlite3_value_int(argv[0]), "w");
     if (!output) {
-      sqlite3_result_error(ctx, "Couldn't open output file from given FD", -1);
-      return;
+      return base::ErrStatus(
+          "EXPORT_JSON: Couldn't open output file from given FD");
     }
   } else {
     const char* filename =
         reinterpret_cast<const char*>(sqlite3_value_text(argv[0]));
     output = fopen(filename, "w");
     if (!output) {
-      sqlite3_result_error(ctx, "Couldn't open output file", -1);
-      return;
+      return base::ErrStatus("EXPORT_JSON: Couldn't open output file");
     }
   }
-
-  util::Status result = json::ExportJson(storage, output);
-  if (!result.ok()) {
-    sqlite3_result_error(ctx, result.message().c_str(), -1);
-    return;
-  }
+  return json::ExportJson(storage, output);
 }
 
-void CreateJsonExportFunction(TraceStorage* ts, sqlite3* db) {
-  auto ret = sqlite3_create_function_v2(db, "EXPORT_JSON", 1, SQLITE_UTF8, ts,
-                                        ExportJson, nullptr, nullptr,
-                                        sqlite_utils::kSqliteStatic);
-  if (ret) {
-    PERFETTO_ELOG("Error initializing EXPORT_JSON");
-  }
-}
+struct Hash : public SqlFunction {
+  static base::Status Run(void*,
+                          size_t argc,
+                          sqlite3_value** argv,
+                          SqlValue& out,
+                          Destructors&);
+};
 
-void Hash(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
+base::Status Hash::Run(void*,
+                       size_t argc,
+                       sqlite3_value** argv,
+                       SqlValue& out,
+                       Destructors&) {
   base::Hash hash;
-  for (int i = 0; i < argc; ++i) {
+  for (size_t i = 0; i < argc; ++i) {
     sqlite3_value* value = argv[i];
-    switch (sqlite3_value_type(value)) {
+    int type = sqlite3_value_type(value);
+    switch (type) {
       case SQLITE_INTEGER:
         hash.Update(sqlite3_value_int64(value));
         break;
@@ -338,41 +364,50 @@
         break;
       }
       default:
-        sqlite3_result_error(ctx, "Unsupported type of arg passed to HASH", -1);
-        return;
+        return base::ErrStatus("HASH: arg %zu has unknown type %d", i, type);
     }
   }
-  sqlite3_result_int64(ctx, static_cast<int64_t>(hash.digest()));
+  out = SqlValue::Long(static_cast<int64_t>(hash.digest()));
+  return base::OkStatus();
 }
 
-void Demangle(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
-  if (argc != 1) {
-    sqlite3_result_error(ctx, "Unsupported number of arg passed to DEMANGLE",
-                         -1);
-    return;
-  }
+struct Demangle : public SqlFunction {
+  static base::Status Run(void*,
+                          size_t argc,
+                          sqlite3_value** argv,
+                          SqlValue& out,
+                          Destructors& destructors);
+};
+
+base::Status Demangle::Run(void*,
+                           size_t argc,
+                           sqlite3_value** argv,
+                           SqlValue& out,
+                           Destructors& destructors) {
+  if (argc != 1)
+    return base::ErrStatus("Unsupported number of arg passed to DEMANGLE");
   sqlite3_value* value = argv[0];
-  if (sqlite3_value_type(value) == SQLITE_NULL) {
-    sqlite3_result_null(ctx);
-    return;
-  }
-  if (sqlite3_value_type(value) != SQLITE_TEXT) {
-    sqlite3_result_error(ctx, "Unsupported type of arg passed to DEMANGLE", -1);
-    return;
-  }
+  if (sqlite3_value_type(value) == SQLITE_NULL)
+    return base::OkStatus();
+
+  if (sqlite3_value_type(value) != SQLITE_TEXT)
+    return base::ErrStatus("Unsupported type of arg passed to DEMANGLE");
+
   const char* ptr = reinterpret_cast<const char*>(sqlite3_value_text(value));
 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
   int ignored = 0;
   // This memory was allocated by malloc and will be passed to SQLite to free.
   char* demangled_name = abi::__cxa_demangle(ptr, nullptr, nullptr, &ignored);
-  if (!demangled_name) {
-    sqlite3_result_null(ctx);
-    return;
-  }
-  sqlite3_result_text(ctx, demangled_name, -1, free);
+  if (!demangled_name)
+    return base::OkStatus();
+
+  destructors.string_destructor = free;
+  out = SqlValue::String(demangled_name);
 #else
-  sqlite3_result_text(ctx, ptr, -1, sqlite_utils::kSqliteTransient);
+  destructors.string_destructor = sqlite_utils::kSqliteTransient;
+  out = SqlValue::String(ptr);
 #endif
+  return base::OkStatus();
 }
 
 void LastNonNullStep(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
@@ -423,25 +458,7 @@
   }
 }
 
-void CreateHashFunction(sqlite3* db) {
-  auto ret = sqlite3_create_function_v2(
-      db, "HASH", -1, SQLITE_UTF8 | SQLITE_DETERMINISTIC, nullptr, &Hash,
-      nullptr, nullptr, nullptr);
-  if (ret) {
-    PERFETTO_ELOG("Error initializing HASH");
-  }
-}
-
-void CreateDemangledNameFunction(sqlite3* db) {
-  auto ret = sqlite3_create_function_v2(
-      db, "DEMANGLE", 1, SQLITE_UTF8 | SQLITE_DETERMINISTIC, nullptr, &Demangle,
-      nullptr, nullptr, nullptr);
-  if (ret != SQLITE_OK) {
-    PERFETTO_ELOG("Error initializing DEMANGLE: %s", sqlite3_errmsg(db));
-  }
-}
-
-void CreateLastNonNullFunction(sqlite3* db) {
+void RegisterLastNonNullFunction(sqlite3* db) {
   auto ret = sqlite3_create_window_function(
       db, "LAST_NON_NULL", 1, SQLITE_UTF8 | SQLITE_DETERMINISTIC, nullptr,
       &LastNonNullStep, &LastNonNullFinal, &LastNonNullValue,
@@ -529,7 +546,7 @@
   }
 }
 
-void CreateValueAtMaxTsFunction(sqlite3* db) {
+void RegisterValueAtMaxTsFunction(sqlite3* db) {
   auto ret = sqlite3_create_function_v2(
       db, "VALUE_AT_MAX_TS", 2, SQLITE_UTF8 | SQLITE_DETERMINISTIC, nullptr,
       nullptr, &ValueAtMaxTsStep, &ValueAtMaxTsFinal, nullptr);
@@ -538,103 +555,73 @@
   }
 }
 
-void ExtractArg(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
-  if (argc != 2) {
-    sqlite3_result_error(ctx, "EXTRACT_ARG: 2 args required", -1);
-    return;
-  }
+struct ExtractArg : public SqlFunction {
+  using Context = TraceStorage;
+  static base::Status Run(TraceStorage* storage,
+                          size_t argc,
+                          sqlite3_value** argv,
+                          SqlValue& out,
+                          Destructors& destructors);
+};
+
+base::Status ExtractArg::Run(TraceStorage* storage,
+                             size_t argc,
+                             sqlite3_value** argv,
+                             SqlValue& out,
+                             Destructors& destructors) {
+  if (argc != 2)
+    return base::ErrStatus("EXTRACT_ARG: 2 args required");
 
   // If the arg set id is null, just return null as the result.
-  if (sqlite3_value_type(argv[0]) == SQLITE_NULL) {
-    sqlite3_result_null(ctx);
-    return;
-  }
-  if (sqlite3_value_type(argv[0]) != SQLITE_INTEGER) {
-    sqlite3_result_error(ctx, "EXTRACT_ARG: 1st argument should be arg set id",
-                         -1);
-    return;
-  }
-  if (sqlite3_value_type(argv[1]) != SQLITE_TEXT) {
-    sqlite3_result_error(ctx, "EXTRACT_ARG: 2nd argument should be key", -1);
-    return;
-  }
+  if (sqlite3_value_type(argv[0]) == SQLITE_NULL)
+    return base::OkStatus();
 
-  TraceStorage* storage = static_cast<TraceStorage*>(sqlite3_user_data(ctx));
+  if (sqlite3_value_type(argv[0]) != SQLITE_INTEGER)
+    return base::ErrStatus("EXTRACT_ARG: 1st argument should be arg set id");
+
+  if (sqlite3_value_type(argv[1]) != SQLITE_TEXT)
+    return base::ErrStatus("EXTRACT_ARG: 2nd argument should be key");
+
   uint32_t arg_set_id = static_cast<uint32_t>(sqlite3_value_int(argv[0]));
   const char* key = reinterpret_cast<const char*>(sqlite3_value_text(argv[1]));
 
   base::Optional<Variadic> opt_value;
-  util::Status status = storage->ExtractArg(arg_set_id, key, &opt_value);
-  if (!status.ok()) {
-    sqlite3_result_error(ctx, status.c_message(), -1);
-    return;
-  }
+  RETURN_IF_ERROR(storage->ExtractArg(arg_set_id, key, &opt_value));
 
-  if (!opt_value) {
-    sqlite3_result_null(ctx);
-    return;
-  }
+  if (!opt_value)
+    return base::OkStatus();
+
+  // This function always returns static strings (i.e. scoped to lifetime
+  // of the TraceStorage thread pool) so prevent SQLite from making copies.
+  destructors.string_destructor = sqlite_utils::kSqliteStatic;
 
   switch (opt_value->type) {
-    case Variadic::kInt:
-      sqlite3_result_int64(ctx, opt_value->int_value);
-      break;
-    case Variadic::kBool:
-      sqlite3_result_int64(ctx, opt_value->bool_value);
-      break;
-    case Variadic::kUint:
-      sqlite3_result_int64(ctx, static_cast<int64_t>(opt_value->uint_value));
-      break;
-    case Variadic::kPointer:
-      sqlite3_result_int64(ctx, static_cast<int64_t>(opt_value->pointer_value));
-      break;
-    case Variadic::kJson:
-      sqlite3_result_text(ctx, storage->GetString(opt_value->json_value).data(),
-                          -1, nullptr);
-      break;
-    case Variadic::kString:
-      sqlite3_result_text(
-          ctx, storage->GetString(opt_value->string_value).data(), -1, nullptr);
-      break;
-    case Variadic::kReal:
-      sqlite3_result_double(ctx, opt_value->real_value);
-      break;
     case Variadic::kNull:
-      sqlite3_result_null(ctx);
-      break;
+      return base::OkStatus();
+    case Variadic::kInt:
+      out = SqlValue::Long(opt_value->int_value);
+      return base::OkStatus();
+    case Variadic::kUint:
+      out = SqlValue::Long(static_cast<int64_t>(opt_value->uint_value));
+      return base::OkStatus();
+    case Variadic::kString:
+      out =
+          SqlValue::String(storage->GetString(opt_value->string_value).data());
+      return base::OkStatus();
+    case Variadic::kReal:
+      out = SqlValue::Double(opt_value->real_value);
+      return base::OkStatus();
+    case Variadic::kBool:
+      out = SqlValue::Long(opt_value->bool_value);
+      return base::OkStatus();
+    case Variadic::kPointer:
+      out = SqlValue::Long(static_cast<int64_t>(opt_value->pointer_value));
+      return base::OkStatus();
+    case Variadic::kJson:
+      out = SqlValue::String(storage->GetString(opt_value->json_value).data());
+      return base::OkStatus();
   }
-}
-
-void CreateExtractArgFunction(TraceStorage* ts, sqlite3* db) {
-  auto ret = sqlite3_create_function_v2(db, "EXTRACT_ARG", 2,
-                                        SQLITE_UTF8 | SQLITE_DETERMINISTIC, ts,
-                                        &ExtractArg, nullptr, nullptr, nullptr);
-  if (ret != SQLITE_OK) {
-    PERFETTO_FATAL("Error initializing EXTRACT_ARG: %s", sqlite3_errmsg(db));
-  }
-}
-
-void CreateSourceGeqFunction(sqlite3* db) {
-  auto fn = [](sqlite3_context* ctx, int, sqlite3_value**) {
-    sqlite3_result_error(
-        ctx, "SOURCE_GEQ should not be called from the global scope", -1);
-  };
-  auto ret = sqlite3_create_function_v2(db, "SOURCE_GEQ", -1,
-                                        SQLITE_UTF8 | SQLITE_DETERMINISTIC,
-                                        nullptr, fn, nullptr, nullptr, nullptr);
-  if (ret != SQLITE_OK) {
-    PERFETTO_FATAL("Error initializing SOURCE_GEQ: %s", sqlite3_errmsg(db));
-  }
-}
-
-void CreateUnwrapMetricProtoFunction(sqlite3* db) {
-  auto ret = sqlite3_create_function_v2(
-      db, "UNWRAP_METRIC_PROTO", 2, SQLITE_UTF8 | SQLITE_DETERMINISTIC, nullptr,
-      &metrics::UnwrapMetricProto, nullptr, nullptr, nullptr);
-  if (ret != SQLITE_OK) {
-    PERFETTO_FATAL("Error initializing UNWRAP_METRIC_PROTO: %s",
-                   sqlite3_errmsg(db));
-  }
+  PERFETTO_FATAL("For GCC");
 }
 
 std::vector<std::string> SanitizeMetricMountPaths(
@@ -650,6 +637,17 @@
   return sanitized;
 }
 
+struct SourceGeq : public SqlFunction {
+  static base::Status Run(void*,
+                          size_t,
+                          sqlite3_value**,
+                          SqlValue&,
+                          Destructors&) {
+    return base::ErrStatus(
+        "SOURCE_GEQ should not be called from the global scope");
+  }
+};
+
 void SetupMetrics(TraceProcessor* tp,
                   sqlite3* db,
                   std::vector<metrics::SqlMetricFile>* sql_metrics,
@@ -672,19 +670,15 @@
     tp->RegisterMetric(file_to_sql.path, file_to_sql.sql);
   }
 
-  {
-    std::unique_ptr<metrics::RunMetricContext> ctx(
-        new metrics::RunMetricContext());
-    ctx->tp = tp;
-    ctx->metrics = sql_metrics;
-    auto ret = sqlite3_create_function_v2(
-        db, "RUN_METRIC", -1, SQLITE_UTF8, ctx.release(), metrics::RunMetric,
-        nullptr, nullptr,
-        [](void* ptr) { delete static_cast<metrics::RunMetricContext*>(ptr); });
-    if (ret)
-      PERFETTO_FATAL("Error initializing RUN_METRIC");
-  }
+  RegisterFunction<metrics::NullIfEmpty>(db, "NULL_IF_EMPTY", 1);
+  RegisterFunction<metrics::UnwrapMetricProto>(db, "UNWRAP_METRIC_PROTO", 2);
+  RegisterFunction<metrics::RunMetric>(
+      db, "RUN_METRIC", -1,
+      std::unique_ptr<metrics::RunMetric::Context>(
+          new metrics::RunMetric::Context{tp, sql_metrics}));
 
+  // TODO(lalitm): migrate this over to using RegisterFunction once aggregate
+  // functions are supported.
   {
     auto ret = sqlite3_create_function_v2(
         db, "RepeatedField", 1, SQLITE_UTF8, nullptr, nullptr,
@@ -692,14 +686,6 @@
     if (ret)
       PERFETTO_FATAL("Error initializing RepeatedField");
   }
-
-  {
-    auto ret = sqlite3_create_function_v2(db, "NULL_IF_EMPTY", 1, SQLITE_UTF8,
-                                          nullptr, metrics::NullIfEmpty,
-                                          nullptr, nullptr, nullptr);
-    if (ret)
-      PERFETTO_FATAL("Error initializing NULL_IF_EMPTY");
-  }
 }
 
 void EnsureSqliteInitialized() {
@@ -749,14 +735,19 @@
   CreateBuiltinViews(db);
   db_.reset(std::move(db));
 
-  CreateJsonExportFunction(context_.storage.get(), db);
-  CreateHashFunction(db);
-  CreateDemangledNameFunction(db);
-  CreateLastNonNullFunction(db);
-  CreateExtractArgFunction(context_.storage.get(), db);
-  CreateSourceGeqFunction(db);
-  CreateValueAtMaxTsFunction(db);
-  CreateUnwrapMetricProtoFunction(db);
+  // New style function registration.
+  RegisterFunction<Hash>(db, "HASH", -1);
+  RegisterFunction<Demangle>(db, "DEMANGLE", 1);
+  RegisterFunction<SourceGeq>(db, "SOURCE_GEQ", -1);
+  RegisterFunction<ExportJson>(db, "EXPORT_JSON", 1, context_.storage.get(),
+                               false);
+  RegisterFunction<ExtractArg>(db, "EXTRACT_ARG", 2, context_.storage.get());
+
+  // Old style function registration.
+  // TODO(lalitm): migrate this over to using RegisterFunction once aggregate
+  // functions are supported.
+  RegisterLastNonNullFunction(db);
+  RegisterValueAtMaxTsFunction(db);
 
   SetupMetrics(this, *db_, &sql_metrics_, cfg.skip_builtin_metric_paths);
 
@@ -1062,20 +1053,10 @@
     // into a function name of the form (TraceMetrics_SubMetric).
     auto fn_name = desc.full_name().substr(desc.package_name().size() + 1);
     std::replace(fn_name.begin(), fn_name.end(), '.', '_');
-
-    std::unique_ptr<metrics::BuildProtoContext> ctx(
-        new metrics::BuildProtoContext());
-    ctx->tp = this;
-    ctx->pool = &pool_;
-    ctx->desc = &desc;
-
-    auto ret = sqlite3_create_function_v2(
-        *db_, fn_name.c_str(), -1, SQLITE_UTF8, ctx.release(),
-        metrics::BuildProto, nullptr, nullptr, [](void* ptr) {
-          delete static_cast<metrics::BuildProtoContext*>(ptr);
-        });
-    if (ret != SQLITE_OK)
-      return util::ErrStatus("%s", sqlite3_errmsg(*db_));
+    RegisterFunction<metrics::BuildProto>(
+        db_.get(), fn_name.c_str(), -1,
+        std::unique_ptr<metrics::BuildProto::Context>(
+            new metrics::BuildProto::Context{this, &pool_, &desc}));
   }
   return util::OkStatus();
 }
diff --git a/src/tracing/internal/track_event_internal.cc b/src/tracing/internal/track_event_internal.cc
index 5e4a154..3717fb2 100644
--- a/src/tracing/internal/track_event_internal.cc
+++ b/src/tracing/internal/track_event_internal.cc
@@ -326,12 +326,11 @@
   }
 
   // Every thread should write a descriptor for its default track, because most
-  // trace points won't explicitly reference it.
+  // trace points won't explicitly reference it. We also write the process
+  // descriptor from every thread that writes trace events to ensure it gets
+  // emitted at least once.
   WriteTrackDescriptor(default_track, trace_writer);
-
-  // Additionally the main thread should dump the process descriptor.
-  if (perfetto::base::GetThreadId() == g_main_thread)
-    WriteTrackDescriptor(ProcessTrack::Current(), trace_writer);
+  WriteTrackDescriptor(ProcessTrack::Current(), trace_writer);
 }
 
 // static
diff --git a/src/tracing/test/api_integrationtest.cc b/src/tracing/test/api_integrationtest.cc
index d01ce33..26d8a03 100644
--- a/src/tracing/test/api_integrationtest.cc
+++ b/src/tracing/test/api_integrationtest.cc
@@ -1431,14 +1431,17 @@
   EXPECT_EQ("goodbye.exe", descs[2].name());
 
   // The child thread records only its own thread descriptor (twice, since it
-  // was mutated).
-  EXPECT_EQ(2u, thread_descs.size());
+  // was mutated). The child thread also emits another copy of the process
+  // descriptor.
+  EXPECT_EQ(3u, thread_descs.size());
   EXPECT_EQ("TestThread", thread_descs[0].name());
   EXPECT_NE(0, thread_descs[0].thread().pid());
   EXPECT_NE(0, thread_descs[0].thread().tid());
   EXPECT_EQ("TestThread", thread_descs[1].name());
   EXPECT_NE(0, thread_descs[1].thread().pid());
   EXPECT_NE(0, thread_descs[1].thread().tid());
+  EXPECT_NE(0, descs[2].process().pid());
+  EXPECT_EQ("goodbye.exe", descs[2].name());
 }
 
 TEST_P(PerfettoApiTest, CustomTrackDescriptor) {
diff --git a/test/.gitignore b/test/.gitignore
index efce6ad..3cb1d2b 100644
--- a/test/.gitignore
+++ b/test/.gitignore
@@ -1 +1,9 @@
+# We only keep the xxx.sha256 files in the git repo.
+# The corresponding binary files are downloaded by
+# `tools/test_data download`, ran by install-build-deps.
 /data/*
+!/data/*.sha256
+
+!/data/ui-screenshots
+/data/ui-screenshots/*
+!/data/ui-screenshots/*.sha256
diff --git a/test/data/README.md.sha256 b/test/data/README.md.sha256
new file mode 100644
index 0000000..c740e86
--- /dev/null
+++ b/test/data/README.md.sha256
@@ -0,0 +1 @@
+12f0487d0c76689562b52e25e5c879c4ff2ab2b214b45556638644dfaef1750c
\ No newline at end of file
diff --git a/test/data/android_log.pb.sha256 b/test/data/android_log.pb.sha256
new file mode 100644
index 0000000..20db167
--- /dev/null
+++ b/test/data/android_log.pb.sha256
@@ -0,0 +1 @@
+841512535c2a95f024443455719adefdbed96073eb48a1ba950ed117d615e4d9
\ No newline at end of file
diff --git a/test/data/android_log_ring_buffer_mode.pb.sha256 b/test/data/android_log_ring_buffer_mode.pb.sha256
new file mode 100644
index 0000000..afa8fe4
--- /dev/null
+++ b/test/data/android_log_ring_buffer_mode.pb.sha256
@@ -0,0 +1 @@
+130113fd0efb6ca6a7de1c36cb7c27a5e06f2dea5cf1183f39594b6882d0a6f9
\ No newline at end of file
diff --git a/test/data/android_sched_and_ps.pb.sha256 b/test/data/android_sched_and_ps.pb.sha256
new file mode 100644
index 0000000..aa7ac2c
--- /dev/null
+++ b/test/data/android_sched_and_ps.pb.sha256
@@ -0,0 +1 @@
+0f0d482576280e159de5b57b2847cae2ca751ab9d523d77ef8d32fe422464714
\ No newline at end of file
diff --git a/test/data/atrace_b_193721088.atr.sha256 b/test/data/atrace_b_193721088.atr.sha256
new file mode 100644
index 0000000..9233bc4
--- /dev/null
+++ b/test/data/atrace_b_193721088.atr.sha256
@@ -0,0 +1 @@
+e875c7dc4e3923ad3752bcb97f141aa83a7e69aecc8dbcae1047853e087541b9
\ No newline at end of file
diff --git a/test/data/callstack_sampling.pftrace.sha256 b/test/data/callstack_sampling.pftrace.sha256
new file mode 100644
index 0000000..8649b41
--- /dev/null
+++ b/test/data/callstack_sampling.pftrace.sha256
@@ -0,0 +1 @@
+0ef35902380a75c52cd83afdb4165b545a5810bde16bad954872994ca21c5ae7
\ No newline at end of file
diff --git a/test/data/chrome_android_systrace.pftrace.sha256 b/test/data/chrome_android_systrace.pftrace.sha256
new file mode 100644
index 0000000..63f5b7d
--- /dev/null
+++ b/test/data/chrome_android_systrace.pftrace.sha256
@@ -0,0 +1 @@
+a96005e5ee9059b89347573f7a7ca53097244fb3532956e0a62897ad77be2ddf
\ No newline at end of file
diff --git a/test/data/chrome_memory_snapshot.pftrace.sha256 b/test/data/chrome_memory_snapshot.pftrace.sha256
new file mode 100644
index 0000000..c55016e
--- /dev/null
+++ b/test/data/chrome_memory_snapshot.pftrace.sha256
@@ -0,0 +1 @@
+4a06b393bf14147b25296797756a4185abf31510aca2db22ddb3c5dbd21123e4
\ No newline at end of file
diff --git a/test/data/chrome_rendering_desktop.pftrace.sha256 b/test/data/chrome_rendering_desktop.pftrace.sha256
new file mode 100644
index 0000000..28c7589
--- /dev/null
+++ b/test/data/chrome_rendering_desktop.pftrace.sha256
@@ -0,0 +1 @@
+f61971e42ea0ce0f6da71c87a0ab19da0e13deca0fa90c6bdc98782af01ae702
\ No newline at end of file
diff --git a/test/data/chrome_scroll_without_vsync.pftrace.sha256 b/test/data/chrome_scroll_without_vsync.pftrace.sha256
new file mode 100644
index 0000000..07deab4
--- /dev/null
+++ b/test/data/chrome_scroll_without_vsync.pftrace.sha256
@@ -0,0 +1 @@
+74890239a1042cb93a87b8b5b5d9942f821ed3cc0c1236e7734d45a550e3cde4
\ No newline at end of file
diff --git a/test/data/chrome_touch_gesture_scroll.pftrace.sha256 b/test/data/chrome_touch_gesture_scroll.pftrace.sha256
new file mode 100644
index 0000000..9d04d5d
--- /dev/null
+++ b/test/data/chrome_touch_gesture_scroll.pftrace.sha256
@@ -0,0 +1 @@
+2fe40090c41ebeb5dc6ce0bea5bc9aef4d7f4cf7fd625209641e0c5ea2210fb7
\ No newline at end of file
diff --git a/test/data/clusterfuzz_14357.sha256 b/test/data/clusterfuzz_14357.sha256
new file mode 100644
index 0000000..c4506d0
--- /dev/null
+++ b/test/data/clusterfuzz_14357.sha256
@@ -0,0 +1 @@
+d9bbe8837573821ff3b505e68b0ed18c3193b8096f786e4b7632b160e44e91ca
\ No newline at end of file
diff --git a/test/data/clusterfuzz_14730.sha256 b/test/data/clusterfuzz_14730.sha256
new file mode 100644
index 0000000..f91a3ae
--- /dev/null
+++ b/test/data/clusterfuzz_14730.sha256
@@ -0,0 +1 @@
+bdf674c136a88137664d22bf366ea3a15ef6c7925a535b1a97b04fdd8b325517
\ No newline at end of file
diff --git a/test/data/clusterfuzz_14753.sha256 b/test/data/clusterfuzz_14753.sha256
new file mode 100644
index 0000000..eae4ae6
--- /dev/null
+++ b/test/data/clusterfuzz_14753.sha256
@@ -0,0 +1 @@
+e992a4a487aea71875a80fa0577aa2bb1324e14383a70022e81d606396a2fcee
\ No newline at end of file
diff --git a/test/data/clusterfuzz_14762.sha256 b/test/data/clusterfuzz_14762.sha256
new file mode 100644
index 0000000..8af9a34
--- /dev/null
+++ b/test/data/clusterfuzz_14762.sha256
@@ -0,0 +1 @@
+cbeec86e06fefc086b9531a9bdb2b35533710627b9e49aeaa53651090fe11d06
\ No newline at end of file
diff --git a/test/data/clusterfuzz_14767.sha256 b/test/data/clusterfuzz_14767.sha256
new file mode 100644
index 0000000..eb98fdb
--- /dev/null
+++ b/test/data/clusterfuzz_14767.sha256
@@ -0,0 +1 @@
+52aac563f69535cee168ae85804e2a67dc2bbcca52f5c3f8018c7f2e40f4867c
\ No newline at end of file
diff --git a/test/data/clusterfuzz_14799.sha256 b/test/data/clusterfuzz_14799.sha256
new file mode 100644
index 0000000..0021481
--- /dev/null
+++ b/test/data/clusterfuzz_14799.sha256
@@ -0,0 +1 @@
+edec5220388bee58ce9827c16676c0285c4b4779ecc1bd9957ca0791fd98315f
\ No newline at end of file
diff --git a/test/data/clusterfuzz_15252.sha256 b/test/data/clusterfuzz_15252.sha256
new file mode 100644
index 0000000..7ebfdfc
--- /dev/null
+++ b/test/data/clusterfuzz_15252.sha256
@@ -0,0 +1 @@
+3d6047483cddadbcbedfc828d42135ca25c51ec09bf55be4f7aae7844e24ba32
\ No newline at end of file
diff --git a/test/data/clusterfuzz_17805.sha256 b/test/data/clusterfuzz_17805.sha256
new file mode 100644
index 0000000..488e5a9
--- /dev/null
+++ b/test/data/clusterfuzz_17805.sha256
@@ -0,0 +1 @@
+f9eb862fec5ef13ecaf2c90c61bb7c88bc1184682f31383a34a2edcc416e5eaa
\ No newline at end of file
diff --git a/test/data/clusterfuzz_20215.sha256 b/test/data/clusterfuzz_20215.sha256
new file mode 100644
index 0000000..f9c8feb
--- /dev/null
+++ b/test/data/clusterfuzz_20215.sha256
@@ -0,0 +1 @@
+f5478862ec1a4e006a70bc15283ba9c2d769a5dc4c513ad43aab678b61a4b43a
\ No newline at end of file
diff --git a/test/data/clusterfuzz_20292.sha256 b/test/data/clusterfuzz_20292.sha256
new file mode 100644
index 0000000..ddfe70d
--- /dev/null
+++ b/test/data/clusterfuzz_20292.sha256
@@ -0,0 +1 @@
+cf2219b9a92fba90ba48e9ec1747e3f8b0d283afda7f56bc4a17fae002ff036c
\ No newline at end of file
diff --git a/test/data/clusterfuzz_21178.sha256 b/test/data/clusterfuzz_21178.sha256
new file mode 100644
index 0000000..11032cd
--- /dev/null
+++ b/test/data/clusterfuzz_21178.sha256
@@ -0,0 +1 @@
+c32f8fab41eeb4928091d9633f6e7b9cb42678ec1f0f1c27239f89e1ba5860ea
\ No newline at end of file
diff --git a/test/data/clusterfuzz_21890.sha256 b/test/data/clusterfuzz_21890.sha256
new file mode 100644
index 0000000..a148c5d
--- /dev/null
+++ b/test/data/clusterfuzz_21890.sha256
@@ -0,0 +1 @@
+469f2c24d560fda648d51f803eed28ef0b686fc11bd7801c808f98367dafd090
\ No newline at end of file
diff --git a/test/data/clusterfuzz_23053.sha256 b/test/data/clusterfuzz_23053.sha256
new file mode 100644
index 0000000..b896d78
--- /dev/null
+++ b/test/data/clusterfuzz_23053.sha256
@@ -0,0 +1 @@
+e424e3b4296939ad07465f937981011927869f6d050e7d5ba2af5dd783e2b48f
\ No newline at end of file
diff --git a/test/data/clusterfuzz_28338.sha256 b/test/data/clusterfuzz_28338.sha256
new file mode 100644
index 0000000..25aa866
--- /dev/null
+++ b/test/data/clusterfuzz_28338.sha256
@@ -0,0 +1 @@
+dee9f2a655a163892299479cb2cb9327b686d8ff2f672a6b9b4f44139e901152
\ No newline at end of file
diff --git a/test/data/clusterfuzz_28766.sha256 b/test/data/clusterfuzz_28766.sha256
new file mode 100644
index 0000000..0de9c27
--- /dev/null
+++ b/test/data/clusterfuzz_28766.sha256
@@ -0,0 +1 @@
+69f872a397a02e7f50e02b334a23e8346850db0a37c017c2076b7be7cc182c2d
\ No newline at end of file
diff --git a/test/data/compact_sched.pb.sha256 b/test/data/compact_sched.pb.sha256
new file mode 100644
index 0000000..5b0e007
--- /dev/null
+++ b/test/data/compact_sched.pb.sha256
@@ -0,0 +1 @@
+1fd2a81ae4354a38f3b433b516f083c9a8c378fe7043450ba7fee84d7fb2f5d8
\ No newline at end of file
diff --git a/test/data/compressed.pb.gz.sha256 b/test/data/compressed.pb.gz.sha256
new file mode 100644
index 0000000..99fbe22
--- /dev/null
+++ b/test/data/compressed.pb.gz.sha256
@@ -0,0 +1 @@
+20a762af59c421dea4211e371299016b47ce7e520f69fc981bd7d43dd9b23be1
\ No newline at end of file
diff --git a/test/data/compressed.pb.sha256 b/test/data/compressed.pb.sha256
new file mode 100644
index 0000000..8e71854
--- /dev/null
+++ b/test/data/compressed.pb.sha256
@@ -0,0 +1 @@
+33d25b9d884f18f04b1f9ded6aadd75e630d5605e5b157e9d1da8f15bea52d96
\ No newline at end of file
diff --git a/test/data/counters.json.sha256 b/test/data/counters.json.sha256
new file mode 100644
index 0000000..dc3aff3
--- /dev/null
+++ b/test/data/counters.json.sha256
@@ -0,0 +1 @@
+5c8a3e8bd3c5e0fb2eec5acd5e11ddb0ab129727f288c35fac3f545708904c35
\ No newline at end of file
diff --git a/test/data/cpu_counters.pb.sha256 b/test/data/cpu_counters.pb.sha256
new file mode 100644
index 0000000..4c5bb68
--- /dev/null
+++ b/test/data/cpu_counters.pb.sha256
@@ -0,0 +1 @@
+e566d656f369d988e42b9792ac14cc6e82ce5928066bbf26210ffe5bbda3384b
\ No newline at end of file
diff --git a/test/data/decimal_timestamp.json.sha256 b/test/data/decimal_timestamp.json.sha256
new file mode 100644
index 0000000..57ab15f
--- /dev/null
+++ b/test/data/decimal_timestamp.json.sha256
@@ -0,0 +1 @@
+698d7493fe7441e491ad5037587fc38269c878b6f76203fe63dda0e7daad54a4
\ No newline at end of file
diff --git a/test/data/display_time_unit.json.sha256 b/test/data/display_time_unit.json.sha256
new file mode 100644
index 0000000..c511e18
--- /dev/null
+++ b/test/data/display_time_unit.json.sha256
@@ -0,0 +1 @@
+208a66ae15b873b6a04ed3e0e67aeb8827e13b8d6e175514991df5d361d11162
\ No newline at end of file
diff --git a/test/data/example_android_trace_30s.pb.gz.sha256 b/test/data/example_android_trace_30s.pb.gz.sha256
new file mode 100644
index 0000000..bb57cf1
--- /dev/null
+++ b/test/data/example_android_trace_30s.pb.gz.sha256
@@ -0,0 +1 @@
+6716542d89bb2d1d9bd7c43d696111184a274bcecf0b3a468388b0d95b3c5336
\ No newline at end of file
diff --git a/test/data/example_android_trace_30s.pb.sha256 b/test/data/example_android_trace_30s.pb.sha256
new file mode 100644
index 0000000..83f6e45
--- /dev/null
+++ b/test/data/example_android_trace_30s.pb.sha256
@@ -0,0 +1 @@
+62656cc8ea2a624db95087850245d16453f0723f723ed2e329080e771bc1f82c
\ No newline at end of file
diff --git a/test/data/fuchsia_trace.fxt.sha256 b/test/data/fuchsia_trace.fxt.sha256
new file mode 100644
index 0000000..0b6b942
--- /dev/null
+++ b/test/data/fuchsia_trace.fxt.sha256
@@ -0,0 +1 @@
+b555e6d1346c8189f0d8b1427c1f2f716b2b15dfb9b7c6a2b978e8207ceaad7f
\ No newline at end of file
diff --git a/test/data/fuchsia_workstation.fxt.sha256 b/test/data/fuchsia_workstation.fxt.sha256
new file mode 100644
index 0000000..cf63c95
--- /dev/null
+++ b/test/data/fuchsia_workstation.fxt.sha256
@@ -0,0 +1 @@
+7fb7e1568313d02f495ddc8063fa9ef209a346f0322d105fac3462ffb60fe1fb
\ No newline at end of file
diff --git a/test/data/full_trace_filter.bytecode.sha256 b/test/data/full_trace_filter.bytecode.sha256
new file mode 100644
index 0000000..278daa6
--- /dev/null
+++ b/test/data/full_trace_filter.bytecode.sha256
@@ -0,0 +1 @@
+c394ad3e15cef817bc3f37ed02b3b3808a5bb8de5bcc8a3cfe83e455bfdad9d5
\ No newline at end of file
diff --git a/test/data/gpu_counters.pb.sha256 b/test/data/gpu_counters.pb.sha256
new file mode 100644
index 0000000..899fc60
--- /dev/null
+++ b/test/data/gpu_counters.pb.sha256
@@ -0,0 +1 @@
+33d59d659b9278fef11afa2ca3bd92c2cd41f46a1e8291babef9f2e256e08fee
\ No newline at end of file
diff --git a/test/data/gpu_trace.pb.sha256 b/test/data/gpu_trace.pb.sha256
new file mode 100644
index 0000000..f8f9037
--- /dev/null
+++ b/test/data/gpu_trace.pb.sha256
@@ -0,0 +1 @@
+19596f1c578f78ef05bfc8e045eed53ce141585703ce8aee0d2fdb1127871812
\ No newline at end of file
diff --git a/test/data/heapprofd_standalone_client_example-trace.sha256 b/test/data/heapprofd_standalone_client_example-trace.sha256
new file mode 100644
index 0000000..c97735a
--- /dev/null
+++ b/test/data/heapprofd_standalone_client_example-trace.sha256
@@ -0,0 +1 @@
+4f10e47ae1259d1a3aac6d6225a437e313882cf9d7b267c3e7d589d57a92d497
\ No newline at end of file
diff --git a/test/data/heapprofd_standalone_client_example.sha256 b/test/data/heapprofd_standalone_client_example.sha256
new file mode 100644
index 0000000..1aea0a1
--- /dev/null
+++ b/test/data/heapprofd_standalone_client_example.sha256
@@ -0,0 +1 @@
+7b95231beb9e7087d6e51564df95a5702635a7b4b880e1c88a4488e620eb8a94
\ No newline at end of file
diff --git a/test/data/instants.json.sha256 b/test/data/instants.json.sha256
new file mode 100644
index 0000000..1d627fb
--- /dev/null
+++ b/test/data/instants.json.sha256
@@ -0,0 +1 @@
+fa5d3c43e9ac589d7ffa5ece7306ab630d48324494b25fd7e2c7319d6cedd3b9
\ No newline at end of file
diff --git a/test/data/kallsyms.txt.sha256 b/test/data/kallsyms.txt.sha256
new file mode 100644
index 0000000..f1855a2
--- /dev/null
+++ b/test/data/kallsyms.txt.sha256
@@ -0,0 +1 @@
+79d35d0254881c0c18107a6985c1f1e95794287168a520dfb1934a03dfe58d3c
\ No newline at end of file
diff --git a/test/data/lmk_userspace.pb.sha256 b/test/data/lmk_userspace.pb.sha256
new file mode 100644
index 0000000..f83d652
--- /dev/null
+++ b/test/data/lmk_userspace.pb.sha256
@@ -0,0 +1 @@
+9b888ba2c4d724f9f034b7505d06e4266d1ebe8bfb4110b2b8eee44c127b24ed
\ No newline at end of file
diff --git a/test/data/log.proto.sha256 b/test/data/log.proto.sha256
new file mode 100644
index 0000000..afa8fe4
--- /dev/null
+++ b/test/data/log.proto.sha256
@@ -0,0 +1 @@
+130113fd0efb6ca6a7de1c36cb7c27a5e06f2dea5cf1183f39594b6882d0a6f9
\ No newline at end of file
diff --git a/test/data/memory_counters.pb.sha256 b/test/data/memory_counters.pb.sha256
new file mode 100644
index 0000000..52129d7
--- /dev/null
+++ b/test/data/memory_counters.pb.sha256
@@ -0,0 +1 @@
+29fa34f6bcd6ed9663dbe9be4615cc8eafc93c1d78b1e5f7353b56da6fcf82e2
\ No newline at end of file
diff --git a/test/data/mm_event.pb.sha256 b/test/data/mm_event.pb.sha256
new file mode 100644
index 0000000..db5cc4b
--- /dev/null
+++ b/test/data/mm_event.pb.sha256
@@ -0,0 +1 @@
+14c3405a0904552202f7325fe730fa1e99fa8b876ef08014984ad5b7ec449f1c
\ No newline at end of file
diff --git a/test/data/ninja_log.sha256 b/test/data/ninja_log.sha256
new file mode 100644
index 0000000..242ae64
--- /dev/null
+++ b/test/data/ninja_log.sha256
@@ -0,0 +1 @@
+2072bf6bad617bfaf8b453e3b9552db0e73c846122c897d3336b6ce41b4fc42b
\ No newline at end of file
diff --git a/test/data/perf_sample.pb.sha256 b/test/data/perf_sample.pb.sha256
new file mode 100644
index 0000000..170e58e
--- /dev/null
+++ b/test/data/perf_sample.pb.sha256
@@ -0,0 +1 @@
+3e4e31dcbe5e4d924b4e391593f81d065ffac3fc9faa224154d8c9723d6b1b54
\ No newline at end of file
diff --git a/test/data/perf_sample_sc.pb.sha256 b/test/data/perf_sample_sc.pb.sha256
new file mode 100644
index 0000000..5b574ac
--- /dev/null
+++ b/test/data/perf_sample_sc.pb.sha256
@@ -0,0 +1 @@
+aa12a4956ec01b2965209c1e4339d583e0797c8908b8fd9dacf239c027c4a769
\ No newline at end of file
diff --git a/test/data/power_rails.pb.sha256 b/test/data/power_rails.pb.sha256
new file mode 100644
index 0000000..fe4a7a0
--- /dev/null
+++ b/test/data/power_rails.pb.sha256
@@ -0,0 +1 @@
+a2eabaafee37c407129451f0074c98ee98537b5ccdc485ebcebc98e1848c071a
\ No newline at end of file
diff --git a/test/data/process_stats_poll.pb.sha256 b/test/data/process_stats_poll.pb.sha256
new file mode 100644
index 0000000..6586df9
--- /dev/null
+++ b/test/data/process_stats_poll.pb.sha256
@@ -0,0 +1 @@
+1e19595eb79493ff9b06ab3411adb082a507ddd55ed1e9dcee776d68b42242c5
\ No newline at end of file
diff --git a/test/data/sched_switch_compact.pb.sha256 b/test/data/sched_switch_compact.pb.sha256
new file mode 100644
index 0000000..ff665b5
--- /dev/null
+++ b/test/data/sched_switch_compact.pb.sha256
@@ -0,0 +1 @@
+ff9077ba16b689405b8a3678c7ce9129a91700a079a1d98f356cf0439e3b9931
\ No newline at end of file
diff --git a/test/data/sched_switch_original.pb.sha256 b/test/data/sched_switch_original.pb.sha256
new file mode 100644
index 0000000..fea71e5
--- /dev/null
+++ b/test/data/sched_switch_original.pb.sha256
@@ -0,0 +1 @@
+530a58c740e3ecd8af756c2034c81664a2022981cc944d8b0224181fad2cb19a
\ No newline at end of file
diff --git a/test/data/sfgate.json.sha256 b/test/data/sfgate.json.sha256
new file mode 100644
index 0000000..cdb4c4e
--- /dev/null
+++ b/test/data/sfgate.json.sha256
@@ -0,0 +1 @@
+56fddbeea278ce21eb314206797a81ae6758a8fe1309b5064f8c64f33873410c
\ No newline at end of file
diff --git a/test/data/synth_1.pb.sha256 b/test/data/synth_1.pb.sha256
new file mode 100644
index 0000000..a722f49
--- /dev/null
+++ b/test/data/synth_1.pb.sha256
@@ -0,0 +1 @@
+60f6625513a9d37260b08dc655e76b88333ff36012271b2565ddc0b8ec27596b
\ No newline at end of file
diff --git a/test/data/sys.pb.sha256 b/test/data/sys.pb.sha256
new file mode 100644
index 0000000..7bf8df4
--- /dev/null
+++ b/test/data/sys.pb.sha256
@@ -0,0 +1 @@
+fe48a7e58c3a06b21079e93da0526f02de8a63b9d08e3468249eebde6967b17f
\ No newline at end of file
diff --git a/test/data/system-server-heap-graph-new.pftrace.sha256 b/test/data/system-server-heap-graph-new.pftrace.sha256
new file mode 100644
index 0000000..17717a8
--- /dev/null
+++ b/test/data/system-server-heap-graph-new.pftrace.sha256
@@ -0,0 +1 @@
+71dd85d790034895c429272d1dc7d429f05ee6b97ce7a02c1ed06abd3a6ff849
\ No newline at end of file
diff --git a/test/data/system-server-heap-graph.pftrace.sha256 b/test/data/system-server-heap-graph.pftrace.sha256
new file mode 100644
index 0000000..a20f454
--- /dev/null
+++ b/test/data/system-server-heap-graph.pftrace.sha256
@@ -0,0 +1 @@
+b887171e5c1b147c832febf7424716fb8462bfd13ff240114e425fd97eb3a6e2
\ No newline at end of file
diff --git a/test/data/system-server-native-profile.sha256 b/test/data/system-server-native-profile.sha256
new file mode 100644
index 0000000..f00c691
--- /dev/null
+++ b/test/data/system-server-native-profile.sha256
@@ -0,0 +1 @@
+d742984b3d3f556c3d00d82ee9c53de2cd9809a5a5ec342ff83c3b0faf053afc
\ No newline at end of file
diff --git a/test/data/systrace.html.sha256 b/test/data/systrace.html.sha256
new file mode 100644
index 0000000..f478e2a
--- /dev/null
+++ b/test/data/systrace.html.sha256
@@ -0,0 +1 @@
+a87a26703e28981ad62554005a081794cfb68cc35f14149e99b4f1c41964c9c5
\ No newline at end of file
diff --git a/test/data/trace_with_descriptor.pftrace.sha256 b/test/data/trace_with_descriptor.pftrace.sha256
new file mode 100644
index 0000000..a475358
--- /dev/null
+++ b/test/data/trace_with_descriptor.pftrace.sha256
@@ -0,0 +1 @@
+ce3e83427ce4c55dc348daaf6553b1750bd8f874da54337458ed4bffb6a78b46
\ No newline at end of file
diff --git a/test/data/trace_with_uuid.pftrace.sha256 b/test/data/trace_with_uuid.pftrace.sha256
new file mode 100644
index 0000000..b0bce34
--- /dev/null
+++ b/test/data/trace_with_uuid.pftrace.sha256
@@ -0,0 +1 @@
+825f5e43c09e067e68b940094044087cd25c32db1a69d30f781f912a0bf115d2
\ No newline at end of file
diff --git a/test/data/track_event_typed_args.pb.sha256 b/test/data/track_event_typed_args.pb.sha256
new file mode 100644
index 0000000..fdc1075
--- /dev/null
+++ b/test/data/track_event_typed_args.pb.sha256
@@ -0,0 +1 @@
+cd5c861185ff6cb14d080f07c2d92b137c985df6aa515850e8acf761b044a84f
\ No newline at end of file
diff --git a/test/data/trailing_empty.systrace.sha256 b/test/data/trailing_empty.systrace.sha256
new file mode 100644
index 0000000..0025d9c
--- /dev/null
+++ b/test/data/trailing_empty.systrace.sha256
@@ -0,0 +1 @@
+8921b2433ecfa8c95f2e43074a5b9318b58bfd88a53fb407baa8952a60c6b0c4
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-android_trace_30s_expand_camera.png.sha256 b/test/data/ui-screenshots/ui-android_trace_30s_expand_camera.png.sha256
new file mode 100644
index 0000000..5ddebaa
--- /dev/null
+++ b/test/data/ui-screenshots/ui-android_trace_30s_expand_camera.png.sha256
@@ -0,0 +1 @@
+f7ff6334f50962bde39b2000a2fc331ac48341782916c62969d026c81570814e
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-android_trace_30s_load.png.sha256 b/test/data/ui-screenshots/ui-android_trace_30s_load.png.sha256
new file mode 100644
index 0000000..3db4e21
--- /dev/null
+++ b/test/data/ui-screenshots/ui-android_trace_30s_load.png.sha256
@@ -0,0 +1 @@
+615f7f108325d458f288e4669c6aef2ae6a9f2e38e380119a61e3dfaca46c185
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-android_trace_30s_search.png.sha256 b/test/data/ui-screenshots/ui-android_trace_30s_search.png.sha256
new file mode 100644
index 0000000..fae0e8f
--- /dev/null
+++ b/test/data/ui-screenshots/ui-android_trace_30s_search.png.sha256
@@ -0,0 +1 @@
+540bf6e6be871ea65a77e6823e5a2f1c33b190fa86220749a487a3e96e08c21a
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-chrome_rendering_desktop_expand_browser_proc.png.sha256 b/test/data/ui-screenshots/ui-chrome_rendering_desktop_expand_browser_proc.png.sha256
new file mode 100644
index 0000000..0b4c919
--- /dev/null
+++ b/test/data/ui-screenshots/ui-chrome_rendering_desktop_expand_browser_proc.png.sha256
@@ -0,0 +1 @@
+af7f01fbbcebcb02b281fa9b943c40c171a040ef83232cdde9786cde7d574642
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-chrome_rendering_desktop_load.png.sha256 b/test/data/ui-screenshots/ui-chrome_rendering_desktop_load.png.sha256
new file mode 100644
index 0000000..f5c3bdd
--- /dev/null
+++ b/test/data/ui-screenshots/ui-chrome_rendering_desktop_load.png.sha256
@@ -0,0 +1 @@
+2fe10bab76852ab7a9c9ac5eb7013a3bfb7e1341ff64baf9dbf9ce6bde10b940
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-chrome_rendering_desktop_select_slice_with_flows.png.sha256 b/test/data/ui-screenshots/ui-chrome_rendering_desktop_select_slice_with_flows.png.sha256
new file mode 100644
index 0000000..76ca381
--- /dev/null
+++ b/test/data/ui-screenshots/ui-chrome_rendering_desktop_select_slice_with_flows.png.sha256
@@ -0,0 +1 @@
+32717778b5a2999adb1e292dc793b0efbd490d3d3aeeb81364470e717f94c6a0
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_navigate_navigate_back_and_forward.png.sha256 b/test/data/ui-screenshots/ui-routing_navigate_navigate_back_and_forward.png.sha256
new file mode 100644
index 0000000..58243f3
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_navigate_navigate_back_and_forward.png.sha256
@@ -0,0 +1 @@
+29cdb8b1a7fb2df704fa16fb9d342aef9f61d17dea58c5c2a57ccc0bda3b874f
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_navigate_open_trace_from_url.png.sha256 b/test/data/ui-screenshots/ui-routing_navigate_open_trace_from_url.png.sha256
new file mode 100644
index 0000000..4efc85f
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_navigate_open_trace_from_url.png.sha256
@@ -0,0 +1 @@
+99911b5f7e6db8081ecbca7d7e2c47bf57b8a54434c639a17830dd4d84b67009
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_open_invalid_trace_from_blank_page.png.sha256 b/test/data/ui-screenshots/ui-routing_open_invalid_trace_from_blank_page.png.sha256
new file mode 100644
index 0000000..f565bae
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_open_invalid_trace_from_blank_page.png.sha256
@@ -0,0 +1 @@
+80e9b51139060f6c3a0fe7a6d35d91d2fdd9b600cf6c8e667236b8aa8498cbfe
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_open_trace_and_go_back_to_landing_page.png.sha256 b/test/data/ui-screenshots/ui-routing_open_trace_and_go_back_to_landing_page.png.sha256
new file mode 100644
index 0000000..b57e638
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_open_trace_and_go_back_to_landing_page.png.sha256
@@ -0,0 +1 @@
+8ac1dbfa90bdf508de0ffda2d37c0a55200dc579fbb8515dc38dd01711735fff
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_access_subpage_then_go_back.png.sha256 b/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_access_subpage_then_go_back.png.sha256
new file mode 100644
index 0000000..7d9ae47
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_access_subpage_then_go_back.png.sha256
@@ -0,0 +1 @@
+1392ef931864190ea4f65d2e8c62834fc921b70ad1a196ebe999d784963c9976
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_first_trace_from_url.png.sha256 b/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_first_trace_from_url.png.sha256
new file mode 100644
index 0000000..4efc85f
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_first_trace_from_url.png.sha256
@@ -0,0 +1 @@
+99911b5f7e6db8081ecbca7d7e2c47bf57b8a54434c639a17830dd4d84b67009
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_second_trace_from_url.png.sha256 b/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_second_trace_from_url.png.sha256
new file mode 100644
index 0000000..6a6b38f
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_second_trace_from_url.png.sha256
@@ -0,0 +1 @@
+0f8d2c39e3db7f5ac6510cdce96c5ff9d5dd95ab0aac64a25672a0b8447334ea
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_trace_from_url.png.sha256 b/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_trace_from_url.png.sha256
new file mode 100644
index 0000000..c2bdfba
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_open_two_traces_then_go_back_open_trace_from_url.png.sha256
@@ -0,0 +1 @@
+1394a4ff204fada30758050221f4c201857e201178131e0f7741a62492d48814
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_start_from_no_trace_go_back_to_first_trace.png.sha256 b/test/data/ui-screenshots/ui-routing_start_from_no_trace_go_back_to_first_trace.png.sha256
new file mode 100644
index 0000000..1e76cba
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_start_from_no_trace_go_back_to_first_trace.png.sha256
@@ -0,0 +1 @@
+4c3365fd082798e6e8e00a845b7ec9f49eac23d90f8f9e61feacce3fdfe4d8ca
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_start_from_no_trace_go_to_page_with_no_trace.png.sha256 b/test/data/ui-screenshots/ui-routing_start_from_no_trace_go_to_page_with_no_trace.png.sha256
new file mode 100644
index 0000000..5f045d6
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_start_from_no_trace_go_to_page_with_no_trace.png.sha256
@@ -0,0 +1 @@
+5262db3e72ea4e0dfd05326b5244b712293d99d7fdbc0b7839d595dcccd748d5
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_invalid_trace.png.sha256 b/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_invalid_trace.png.sha256
new file mode 100644
index 0000000..9e91e29
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_invalid_trace.png.sha256
@@ -0,0 +1 @@
+cf77763da2a3cc43b56e08debd14a09b1e3fe13ad9a5dc25b599d81bc0d5e236
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_second_trace.png.sha256 b/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_second_trace.png.sha256
new file mode 100644
index 0000000..a461053
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_second_trace.png.sha256
@@ -0,0 +1 @@
+ffb8037bb4c40d392cd0d99220fc1330ef53ca0c912dbf104dcb5c4900dc9e05
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_trace_.png.sha256 b/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_trace_.png.sha256
new file mode 100644
index 0000000..7d9ae47
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_start_from_no_trace_open_trace_.png.sha256
@@ -0,0 +1 @@
+1392ef931864190ea4f65d2e8c62834fc921b70ad1a196ebe999d784963c9976
\ No newline at end of file
diff --git a/test/data/ui-screenshots/ui-routing_start_from_no_trace_refresh.png.sha256 b/test/data/ui-screenshots/ui-routing_start_from_no_trace_refresh.png.sha256
new file mode 100644
index 0000000..7d9ae47
--- /dev/null
+++ b/test/data/ui-screenshots/ui-routing_start_from_no_trace_refresh.png.sha256
@@ -0,0 +1 @@
+1392ef931864190ea4f65d2e8c62834fc921b70ad1a196ebe999d784963c9976
\ No newline at end of file
diff --git a/test/data/unsorted_trace.json.sha256 b/test/data/unsorted_trace.json.sha256
new file mode 100644
index 0000000..14c96d1
--- /dev/null
+++ b/test/data/unsorted_trace.json.sha256
@@ -0,0 +1 @@
+360b23ca6419ef1ce27de35b9e46fc4202db30f5093596e2dca841e547d13ccd
\ No newline at end of file
diff --git a/tools/add_test_data b/tools/add_test_data
deleted file mode 100755
index a3f5b6b..0000000
--- a/tools/add_test_data
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Adds a file or directory to /test/data/, uploads a new test_data.zip to GCS
-# and updates the sha1 in tools/install-build-deps.
-
-set -e
-
-ROOT_DIR="$(dirname $(cd -P ${BASH_SOURCE[0]%/*}; pwd))"
-
-echo ""
-echo "Downloading latest copy of test data"
-echo ""
-LATEST_ZIP="$(cat tools/install-build-deps  | grep -o 'https://.*/perfetto/test-data-.*.zip')"
-curl -o /tmp/latest-test-data.zip $LATEST_ZIP
-
-echo ""
-echo "Extracting test data to temp folder"
-echo ""
-rm -rf /tmp/latest-test-data 2>/dev/null
-unzip /tmp/latest-test-data.zip -d /tmp/latest-test-data
-
-echo ""
-echo "Copying $1 to temp folder"
-echo ""
-
-set -x
-if [ -d "$1" ]; then
-  DIR_NAME="$(basename $1)"
-  rm -rf "/tmp/latest-test-data/$DIR_NAME"
-  cp -r "$1" "/tmp/latest-test-data/$DIR_NAME"
-else
-  cp "$1" /tmp/latest-test-data
-fi
-set +x
-
-echo ""
-echo "Zipping file back up"
-echo ""
-NEW_TEST_DATA="test-data-$(date +%Y%m%d-%H%M%S).zip"
-CWD="$(pwd)"
-cd /tmp/latest-test-data
-zip -r /tmp/$NEW_TEST_DATA *
-cd $CWD
-
-echo ""
-echo "Uploading file to Google Cloud"
-echo ""
-gsutil cp /tmp/$NEW_TEST_DATA gs://perfetto/$NEW_TEST_DATA
-
-echo ""
-echo "Setting file to world readable"
-echo ""
-gsutil acl ch -u AllUsers:R gs://perfetto/$NEW_TEST_DATA
-
-echo ""
-echo "SHA-256 of file $NEW_TEST_DATA is"
-NEW_SHA=$(shasum -a 256 /tmp/$NEW_TEST_DATA | cut -c1-64)
-echo $NEW_SHA
-
-echo ""
-echo "Cleaning up leftover files"
-echo ""
-rm -r /tmp/latest-test-data
-rm /tmp/latest-test-data.zip
-rm /tmp/$NEW_TEST_DATA
-
-echo ""
-echo "Updating tools/install-build-deps"
-echo ""
-
-OLD_SHA=$(cat tools/install-build-deps | grep '/test-data-.*.zip' -A1 | tail -n1 | egrep -o '[a-f0-9]+')
-
-# Cannot easily use sed -i, it has different syntax on Linux vs Mac.
-cat tools/install-build-deps \
-  | sed -e "s|/test-data-.*.zip|/$NEW_TEST_DATA|g" \
-  | sed -e "s|$OLD_SHA|$NEW_SHA|g" \
-  > tools/install-build-deps.tmp
-
-mv -f tools/install-build-deps.tmp tools/install-build-deps
-chmod 755 tools/install-build-deps
-
-echo "All done!"
diff --git a/tools/add_tp_diff_test.py b/tools/add_tp_diff_test.py
index ce3ece7..16836b3 100755
--- a/tools/add_tp_diff_test.py
+++ b/tools/add_tp_diff_test.py
@@ -82,8 +82,8 @@
   print()
   trace_file = ''
   if trace_type == 'proto':
-    print('Proto traces should be added to the test-data zip '
-          'using the tools/add_test_data')
+    print('Proto traces should be added to the test_data GCS bucket '
+          'using tools/test_data upload')
     stdout_write('Provide the name of the trace (including any '
                  'extension) relative to test/data: ')
 
diff --git a/tools/batch_trace_processor/main.py b/tools/batch_trace_processor/main.py
new file mode 100644
index 0000000..36a4d53
--- /dev/null
+++ b/tools/batch_trace_processor/main.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Given a trace file, gives the self-time of userspace slices broken
+down by process, thread and thread state.
+"""
+
+import argparse
+import cmd
+import logging
+import numpy as np
+import pandas as pd
+import plotille
+
+from concurrent.futures import ThreadPoolExecutor
+from dataclasses import dataclass
+from perfetto.trace_processor import TraceProcessor
+from perfetto.trace_processor.api import TraceProcessorException
+
+
+@dataclass
+class TpArg:
+  shell_path: str
+  verbose: bool
+  file: str
+
+
+def create_tp_args(args, files):
+  return [TpArg(args.shell_path, args.verbose, f) for f in files]
+
+
+def create_tp(arg):
+  return TraceProcessor(
+      file_path=arg.file, bin_path=arg.shell_path, verbose=arg.verbose)
+
+
+def close_tp(tp):
+  tp.close()
+
+
+def query_single_result(tp, query):
+  df = tp.query(query).as_pandas_dataframe()
+  if len(df.index) != 1:
+    raise TraceProcessorException("Query should only return a single row")
+
+  if len(df.columns) != 1:
+    raise TraceProcessorException("Query should only return a single column")
+
+  return df.iloc[0, 0]
+
+
+def query_file_and_return_last(tp, queries_str):
+  queries = [q.strip() for q in queries_str.split(";\n")]
+  return [tp.query(q).as_pandas_dataframe() for q in queries if q][-1]
+
+
+def prefix_path_column(path, df):
+  df['trace_file_path'] = path
+  return df
+
+
+class TpBatchShell(cmd.Cmd):
+
+  def __init__(self, executor, files, tps):
+    super().__init__()
+    self.executor = executor
+    self.files = files
+    self.tps = tps
+
+  def do_histogram(self, arg):
+    try:
+      data = list(
+          self.executor.map(lambda tp: query_single_result(tp, arg), self.tps))
+      print(plotille.histogram(data))
+      self.print_percentiles(data)
+    except TraceProcessorException as ex:
+      logging.error("Query failed: {}".format(ex))
+
+  def do_vhistogram(self, arg):
+    try:
+      data = list(
+          self.executor.map(lambda tp: query_single_result(tp, arg), self.tps))
+      print(plotille.hist(data))
+      self.print_percentiles(data)
+    except TraceProcessorException as ex:
+      logging.error("Query failed: {}".format(ex))
+
+  def do_count(self, arg):
+    try:
+      data = list(
+          self.executor.map(lambda tp: query_single_result(tp, arg), self.tps))
+      counts = dict()
+      for i in data:
+        counts[i] = counts.get(i, 0) + 1
+      print(counts)
+    except TraceProcessorException as ex:
+      logging.error("Query failed: {}".format(ex))
+
+  def do_close(self, _):
+    return True
+
+  def do_quit(self, _):
+    return True
+
+  def do_EOF(self, _):
+    print("")
+    return True
+
+  def print_percentiles(self, data):
+    percentiles = [25, 50, 75, 95, 99, 99.9]
+    nearest = np.percentile(data, percentiles, interpolation='nearest')
+    logging.info("Representative traces for percentiles")
+    for i, near in enumerate(nearest):
+      print("{}%: {}".format(percentiles[i], self.files[data.index(near)]))
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--shell-path', default=None)
+  parser.add_argument('--verbose', action='store_true', default=False)
+  parser.add_argument('--file-list', default=None)
+  parser.add_argument('--query-file', default=None)
+  parser.add_argument('--interactive', default=None)
+  parser.add_argument('files', nargs='*')
+  args = parser.parse_args()
+
+  logging.basicConfig(level=logging.DEBUG)
+
+  files = args.files
+  if args.file_list:
+    with open(args.file_list, 'r') as f:
+      files += f.read().splitlines()
+
+  if not files:
+    logging.info("At least one file must be specified in files or file list")
+
+  executor = ThreadPoolExecutor()
+
+  logging.info('Loading traces...')
+  tps = [tp for tp in executor.map(create_tp, create_tp_args(args, files))]
+
+  if args.query_file:
+    logging.info('Running query file...')
+
+    with open(args.query_file, 'r') as f:
+      query = f.read()
+
+    out = list(
+        executor.map(lambda tp: query_file_and_return_last(tp, query), tps))
+    res = pd.concat(
+        [prefix_path_column(path, df) for (path, df) in zip(files, out)])
+    print(res.to_csv(index=False))
+
+  if args.interactive or not args.query_file:
+    try:
+      TpBatchShell(executor, files, tps).cmdloop()
+    except KeyboardInterrupt:
+      pass
+
+  logging.info("Closing; please wait...")
+  executor.map(close_tp, tps)
+  executor.shutdown()
+
+
+if __name__ == '__main__':
+  exit(main())
diff --git a/tools/gen_merged_protos b/tools/gen_merged_protos
index 76040b4..a7fc12d 100755
--- a/tools/gen_merged_protos
+++ b/tools/gen_merged_protos
@@ -52,6 +52,8 @@
 syntax = "proto2";
 
 package perfetto.protos;
+
+option go_package = "github.com/google/perfetto/perfetto_proto";
 '''
 
 
diff --git a/tools/install-build-deps b/tools/install-build-deps
index a73caf8..c53ee22 100755
--- a/tools/install-build-deps
+++ b/tools/install-build-deps
@@ -143,7 +143,8 @@
         'buildtools/protobuf',
         'https://chromium.googlesource.com/external/github.com/google/protobuf.git',
         '6a59a2ad1f61d9696092f79b6d74368b4d7970a3',  # refs/tags/v3.9.0
-        'all', 'all'),
+        'all',
+        'all'),
 
     # libc++, libc++abi and libunwind for Linux where we need to rebuild the C++
     # lib from sources. Keep the SHA1s in sync with Chrome's src/buildtools/DEPS.
@@ -193,7 +194,8 @@
         'buildtools/sqlite_src',
         'https://chromium.googlesource.com/external/github.com/sqlite/sqlite.git',
         'ee3686eb50c0e3dbb087c9a0976f7e37e1b014ae',  # refs/tags/version-3.32.3.
-        'all', 'all'),
+        'all',
+        'all'),
 
     # JsonCpp for legacy json import. Used only by the trace processor in
     # standalone builds.
@@ -201,7 +203,8 @@
         'buildtools/jsoncpp',
         'https://chromium.googlesource.com/external/github.com/open-source-parsers/jsoncpp.git',
         '6aba23f4a8628d599a9ef7fa4811c4ff6e4070e2',  # refs/tags/1.9.3.
-        'all', 'all'),
+        'all',
+        'all'),
 
     # These dependencies are for libunwindstack, which is used by src/profiling.
     Dependency('buildtools/android-core',
@@ -210,7 +213,7 @@
     Dependency(
         'buildtools/android-unwinding',
         'https://android.googlesource.com/platform/system/unwinding.git',
-        '88367cfde862f4bdd1da64d691b8a3854026f439', 'all', 'all'),
+        '734cc2f1d45d5e0269654cb5b8fdecfc93dd2ad3', 'all', 'all'),
     Dependency('buildtools/android-logging',
                'https://android.googlesource.com/platform/system/logging.git',
                '7b36b566c9113fc703d68f76e8f40c0c2432481c', 'all', 'all'),
@@ -231,23 +234,17 @@
                'https://android.googlesource.com/platform/bionic.git',
                '332065d57e734b65f56474d136d22d767e36cbcd', 'all', 'all'),
 
-    # Example traces for regression tests.
-    Dependency(
-        'test/data.zip',
-        'https://storage.googleapis.com/perfetto/test-data-20211104-140001.zip',
-        '99f5761cd14d0ae4dc0b9d7b6e8f8752d3ba0d7d37d67121eef40f089f95dadb',
-        'all', 'all',
-    ),
-
     # Linenoise, used only by trace_processor in standalone builds.
     Dependency('buildtools/linenoise',
                'https://fuchsia.googlesource.com/third_party/linenoise.git',
                'c894b9e59f02203dbe4e2be657572cf88c4230c3', 'all', 'all'),
 
     # Bloaty, used to investigate binary size
-    Dependency('buildtools/bloaty.zip',
-               'https://storage.googleapis.com/perfetto/bloaty-1.1-b3b829de35babc2fe831b9488ad2e50bca939412-mac.zip',
-               '2d301bd72a20e3f42888c9274ceb4dca76c103608053572322412c2c65ab8cb8', 'darwin', 'x64'),
+    Dependency(
+        'buildtools/bloaty.zip',
+        'https://storage.googleapis.com/perfetto/bloaty-1.1-b3b829de35babc2fe831b9488ad2e50bca939412-mac.zip',
+        '2d301bd72a20e3f42888c9274ceb4dca76c103608053572322412c2c65ab8cb8',
+        'darwin', 'x64'),
 ]
 
 # Dependencies required to build Android code.
@@ -347,13 +344,15 @@
         'linux', 'all'),
 ]
 
-ALL_DEPS = (BUILD_DEPS_HOST + BUILD_DEPS_ANDROID +
-    BUILD_DEPS_LINUX_CROSS_SYSROOTS + TEST_DEPS_ANDROID + UI_DEPS)
+ALL_DEPS = (
+    BUILD_DEPS_HOST + BUILD_DEPS_ANDROID + BUILD_DEPS_LINUX_CROSS_SYSROOTS +
+    TEST_DEPS_ANDROID + UI_DEPS)
 
 ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 UI_DIR = os.path.join(ROOT_DIR, 'ui')
 TOOLS_DIR = os.path.join(ROOT_DIR, 'tools')
 NODE_MODULES_STATUS_FILE = os.path.join(UI_DIR, 'node_modules', '.last_install')
+TEST_DATA_SCRIPT = os.path.join(TOOLS_DIR, 'test_data')
 
 
 def DownloadURL(url, out_file):
@@ -421,8 +420,7 @@
       not os.path.abspath(path).startswith(test_path)):
     # Safety check to prevent that some merge confilct ends up doing some
     # rm -rf / or similar.
-    logging.fatal(
-      'Cannot remove %s: outside of buildtools and test/data', path)
+    logging.fatal('Cannot remove %s: outside of buildtools and test/data', path)
     sys.exit(1)
   logging.info('Removing %s' % path)
   shutil.rmtree(path, onerror=del_read_only_for_windows)
@@ -481,8 +479,8 @@
   for dep in ALL_DEPS:
     if dep.source_url.endswith('.git'):
       continue
-    logging.info('Downloading %s for %s-%s', dep.source_url,
-        dep.target_os, dep.target_arch)
+    logging.info('Downloading %s for %s-%s', dep.source_url, dep.target_os,
+                 dep.target_arch)
     with tempfile.NamedTemporaryFile(delete=False) as f:
       f.close()
       DownloadURL(dep.source_url, f.name)
@@ -495,17 +493,23 @@
 
 def Main():
   parser = argparse.ArgumentParser()
-  parser.add_argument('--android', action='store_true',
+  parser.add_argument(
+      '--android',
+      action='store_true',
       help='NDK and emulator images target_os="android"')
-  parser.add_argument('--linux-arm', action='store_true',
+  parser.add_argument(
+      '--linux-arm',
+      action='store_true',
       help='Debian sysroots for target_os="linux" target_cpu="arm|arm64"')
-  parser.add_argument('--ui', action='store_true',
+  parser.add_argument(
+      '--ui',
+      action='store_true',
       help='Node and NPM packages to Build the Web-based UI via ./ui/build')
   parser.add_argument('--check-only')
   parser.add_argument('--filter', default='')
   parser.add_argument('--verify', help='Check all URLs', action='store_true')
-  parser.add_argument('--no-toolchain', help='Do not download toolchain',
-                      action='store_true')
+  parser.add_argument(
+      '--no-toolchain', help='Do not download toolchain', action='store_true')
   args = parser.parse_args()
   if args.verify:
     CheckHashes()
@@ -608,16 +612,26 @@
     else:
       InstallNodeModules(force_clean=nodejs_updated)
 
+  cur_python_interpreter = sys.executable
+  test_data_synced = 0 == subprocess.call(
+      [cur_python_interpreter, TEST_DATA_SCRIPT, 'status', '--quiet'])
   if args.check_only:
-    if not deps_updated:
+    if not deps_updated and test_data_synced:
       with open(args.check_only, 'w') as f:
         f.write('OK')  # The content is irrelevant, just keep GN happy.
       return 0
-    argz = ' '.join([x for x in sys.argv[1:] if not x.startswith('--check-only')])
+    argz = ' '.join(
+        [x for x in sys.argv[1:] if not x.startswith('--check-only')])
     print('\033[91mBuild deps are stale. ' +
           'Please run tools/install-build-deps %s\033[0m' % argz)
     return 1
 
+  if not test_data_synced:
+    cmd = [cur_python_interpreter, TEST_DATA_SCRIPT, 'download']
+    if not sys.stdout.isatty():
+      cmd += ['--verbose']  # For CI bots
+    subprocess.check_call(cmd)
+
   if deps_updated:
     # Stale binary files may be compiled against old sysroot headers that aren't
     # tracked by gn.
diff --git a/tools/test_data b/tools/test_data
new file mode 100755
index 0000000..4aa2139
--- /dev/null
+++ b/tools/test_data
@@ -0,0 +1,252 @@
+#!/usr/bin/env python3
+"""
+Script to synchronize (local>remote and viceversa) test data files from/to GCS.
+
+//test/data files are not checked in the codebase because they are large binary
+file and change frequently. Instead we check-in only xxx.sha256 files, which
+contain the SHA-256 of the actual binary file, and sync them from a GCS bucket.
+
+File in the GCS bucket are content-indexed as gs://bucket/file_name-a1b2c3f4 .
+
+Usage:
+./test_data status     # Prints the status of new & modified files.
+./test_data download   # To sync remote>local (used by install-build-deps).
+./test_data upload     # To upload newly created and modified files.
+"""
+
+import argparse
+import logging
+import os
+import sys
+import hashlib
+import subprocess
+
+from multiprocessing.pool import ThreadPool
+from collections import namedtuple, defaultdict
+
+ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+BUCKET = 'gs://perfetto/test_data'
+SUFFIX = '.sha256'
+
+FS_MATCH = 'matches'
+FS_NEW_FILE = 'needs upload'
+FS_MODIFIED = 'modified'
+FS_MISSING = 'needs download'
+
+FileStat = namedtuple('FileStat',
+                      ['path', 'status', 'actual_digest', 'expected_digest'])
+args = None
+
+
+def relpath(path):
+  return os.path.relpath(path, ROOT_DIR)
+
+
+def download(url, out_file):
+  subprocess.check_call(['curl', '-L', '-s', '-o', out_file, url])
+
+
+def list_files(path, scan_new_files=False):
+  """ List files recursively in path.
+
+  If scan_new_files=False, returns only files with a maching xxx.sha256 tracker.
+  If scan_new_files=True returns all files including untracked ones.
+  """
+  seen = set()
+  for root, _, files in os.walk(path):
+    for fname in files:
+      if fname.endswith('.swp'):
+        continue  # Temporary files left around if CTRL-C-ing while downloading.
+      fpath = os.path.join(root, fname)
+      if not os.path.isfile(fpath) or fname.startswith('.'):
+        continue
+      if fpath.endswith(SUFFIX):
+        fpath = fpath[:-len(SUFFIX)]
+      elif not scan_new_files:
+        continue
+      if fpath not in seen:
+        seen.add(fpath)
+        yield fpath
+
+
+def hash_file(fpath):
+  hasher = hashlib.sha256()
+  with open(fpath, 'rb') as f:
+    for chunk in iter(lambda: f.read(32768), b''):
+      hasher.update(chunk)
+  return hasher.hexdigest()
+
+
+def map_concurrently(fn, files):
+  done = 0
+  for fs in ThreadPool(args.jobs).imap_unordered(fn, files):
+    assert (isinstance(fs, FileStat))
+    done += 1
+    if not args.quiet:
+      print(
+          '[%d/%d] %-60s' % (done, len(files), relpath(fs.path)[-60:]),
+          end='\r')
+  if not args.quiet:
+    print('')
+
+
+def get_file_status(fpath):
+  sha_file = fpath + SUFFIX
+  sha_exists = os.path.exists(sha_file)
+  file_exists = os.path.exists(fpath)
+  actual_digest = None
+  expected_digest = None
+  if sha_exists:
+    with open(sha_file, 'r') as f:
+      expected_digest = f.readline().strip()
+  if file_exists:
+    actual_digest = hash_file(fpath)
+  if sha_exists and not file_exists:
+    status = FS_MISSING
+  elif not sha_exists and file_exists:
+    status = FS_NEW_FILE
+  elif not sha_exists and not file_exists:
+    raise Exception(fpath)
+  elif expected_digest == actual_digest:
+    status = FS_MATCH
+  else:
+    status = FS_MODIFIED
+  return FileStat(fpath, status, actual_digest, expected_digest)
+
+
+def cmd_upload(dir):
+  all_files = list_files(dir, scan_new_files=True)
+  files_to_upload = []
+  for fs in ThreadPool(args.jobs).imap_unordered(get_file_status, all_files):
+    if fs.status in (FS_NEW_FILE, FS_MODIFIED):
+      files_to_upload.append(fs)
+  if len(files_to_upload) == 0:
+    if not args.quiet:
+      print('No modified or new files require uploading')
+    return 0
+  if args.dry_run:
+    return 0
+  if not args.quiet:
+    print('About to upload %d files:' % len(files_to_upload))
+    print('\n'.join(relpath(f.path) for f in files_to_upload))
+    print('')
+    input('Press a key to continue or CTRL-C to abort')
+
+  def upload_one_file(fs):
+    assert (fs.actual_digest is not None)
+    dst_name = '%s/%s-%s' % (args.bucket, os.path.basename(
+        fs.path), fs.actual_digest)
+    cmd = ['gsutil', '-q', 'cp', '-a', 'public-read', fs.path, dst_name]
+    logging.debug(' '.join(cmd))
+    subprocess.check_call(cmd)
+    with open(fs.path + SUFFIX + '.swp', 'w') as f:
+      f.write(fs.actual_digest)
+    os.rename(fs.path + SUFFIX + '.swp', fs.path + SUFFIX)
+    return fs
+
+  map_concurrently(upload_one_file, files_to_upload)
+  return 0
+
+
+def cmd_download(dir, overwrite_locally_modified=False):
+  files_to_download = []
+  modified = []
+  all_files = list_files(dir, scan_new_files=False)
+  for fs in ThreadPool(args.jobs).imap_unordered(get_file_status, all_files):
+    if fs.status == FS_MISSING:
+      files_to_download.append(fs)
+    elif fs.status == FS_MODIFIED:
+      modified.append(fs)
+
+  if len(modified) > 0 and not overwrite_locally_modified:
+    print('WARNING: The following files diverged locally and will NOT be ' +
+          'overwritten if you continue')
+    print('\n'.join(relpath(f.path) for f in modified))
+    print('')
+    print('Re run `download --overwrite` to overwrite locally modified files')
+    print('or `upload` to sync them on the GCS bucket')
+    print('')
+    input('Press a key to continue or CTRL-C to abort')
+  elif overwrite_locally_modified:
+    files_to_download += modified
+
+  if len(files_to_download) == 0:
+    if not args.quiet:
+      print('Nothing to do, all files are synced')
+    return 0
+
+  if not args.quiet:
+    print('Downloading %d files in //%s' %
+          (len(files_to_download), relpath(args.dir)))
+  if args.dry_run:
+    print('\n'.join(files_to_download))
+    return
+
+  def download_one_file(fs):
+    assert (fs.expected_digest is not None)
+    uri = '%s/%s-%s' % (args.bucket, os.path.basename(
+        fs.path), fs.expected_digest)
+    uri = uri.replace('gs://', 'https://storage.googleapis.com/')
+    logging.debug(uri)
+    tmp_path = fs.path + '.swp'
+    download(uri, tmp_path)
+    digest = hash_file(tmp_path)
+    if digest != fs.expected_digest:
+      raise Exception('Mismatching digest for %s. expected=%s, actual=%s' %
+                      (uri, fs.expected_digest, digest))
+    os.rename(tmp_path, fs.path)
+    return fs
+
+  map_concurrently(download_one_file, files_to_download)
+  return 0
+
+
+def cmd_status(dir):
+  files = list_files(dir, scan_new_files=True)
+  file_by_status = defaultdict(list)
+  num_files = 0
+  num_out_of_sync = 0
+  for fs in ThreadPool(args.jobs).imap_unordered(get_file_status, files):
+    file_by_status[fs.status].append(relpath(fs.path))
+    num_files += 1
+  for status, rpaths in sorted(file_by_status.items()):
+    if status != FS_MATCH:
+      for rpath in rpaths:
+        num_out_of_sync += 1
+        if not args.quiet:
+          print('%-15s: %s' % (status, rpath))
+  if num_out_of_sync == 0:
+    if not args.quiet:
+      print('Scanned %d files in //%s, everything in sync.' %
+            (num_files, relpath(dir)))
+    return 0
+  return 1
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--dir', default=os.path.join(ROOT_DIR, 'test/data'))
+  parser.add_argument('--overwrite', action='store_true')
+  parser.add_argument('--bucket', default=BUCKET)
+  parser.add_argument('--jobs', '-j', default=10, type=int)
+  parser.add_argument('--dry-run', '-n', action='store_true')
+  parser.add_argument('--quiet', '-q', action='store_true')
+  parser.add_argument('--verbose', '-v', action='store_true')
+  parser.add_argument('cmd', choices=['status', 'download', 'upload'])
+  global args
+  args = parser.parse_args()
+  logging.basicConfig(
+      format='%(asctime)s %(levelname).1s %(message)s',
+      level=logging.DEBUG if args.verbose else logging.INFO,
+      datefmt=r'%H:%M:%S')
+  if args.cmd == 'status':
+    return cmd_status(args.dir)
+  if args.cmd == 'download':
+    return cmd_download(args.dir, overwrite_locally_modified=args.overwrite)
+  if args.cmd == 'upload':
+    return cmd_upload(args.dir)
+  print('Unknown command: %s' % args.cmd)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/ui/src/common/actions.ts b/ui/src/common/actions.ts
index 26bc554..0bdc3df 100644
--- a/ui/src/common/actions.ts
+++ b/ui/src/common/actions.ts
@@ -177,11 +177,28 @@
     state.traceUuid = args.traceUuid;
   },
 
+  fillUiTrackIdByTraceTrackId(
+      state: StateDraft, trackState: TrackState, uiTrackId: string) {
+    const config = trackState.config as {trackId: number};
+    if (config.trackId !== undefined) {
+      state.uiTrackIdByTraceTrackId.set(config.trackId, uiTrackId);
+      return;
+    }
+
+    const multiple = trackState.config as {trackIds: number[]};
+    if (multiple.trackIds !== undefined) {
+      for (const trackId of multiple.trackIds) {
+        state.uiTrackIdByTraceTrackId.set(trackId, uiTrackId);
+      }
+    }
+  },
+
   addTracks(state: StateDraft, args: {tracks: AddTrackArgs[]}) {
     args.tracks.forEach(track => {
       const id = track.id === undefined ? `${state.nextId++}` : track.id;
       track.id = id;
       state.tracks[id] = track as TrackState;
+      this.fillUiTrackIdByTraceTrackId(state, track as TrackState, id);
       if (track.trackGroup === SCROLLING_TRACK_GROUP) {
         state.scrollingTracks.push(id);
       } else if (track.trackGroup !== undefined) {
@@ -204,6 +221,7 @@
       trackGroup: args.trackGroup,
       config: args.config,
     };
+    this.fillUiTrackIdByTraceTrackId(state, state.tracks[id], id);
     if (args.trackGroup === SCROLLING_TRACK_GROUP) {
       state.scrollingTracks.push(id);
     } else if (args.trackGroup !== undefined) {
@@ -878,6 +896,12 @@
     state.currentTab = args.tab;
   },
 
+  toggleAllTrackGroups(state: StateDraft, args: {collapsed: boolean}) {
+    for (const [_, group] of Object.entries(state.trackGroups)) {
+      group.collapsed = args.collapsed;
+    }
+  },
+
   addNewPivotTable(state: StateDraft, args: {
     name: string,
     pivotTableId: string,
diff --git a/ui/src/common/immer_init.ts b/ui/src/common/immer_init.ts
index b0ecf01..9b8817c 100644
--- a/ui/src/common/immer_init.ts
+++ b/ui/src/common/immer_init.ts
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-import {enablePatches, setAutoFreeze} from 'immer';
+import {enableMapSet, enablePatches, setAutoFreeze} from 'immer';
 
 export function initializeImmerJs() {
   enablePatches();
@@ -24,4 +24,6 @@
   // By doing so, we  accidentally the local copy of global state, which is
   // supposed to be immutable.
   setAutoFreeze(false);
+
+  enableMapSet();
 }
diff --git a/ui/src/common/state.ts b/ui/src/common/state.ts
index 543c635..3b9cf21 100644
--- a/ui/src/common/state.ts
+++ b/ui/src/common/state.ts
@@ -74,8 +74,10 @@
 // 9: Add a field to track last loaded recording profile name
 // 10: Change last loaded profile tracking type to accommodate auto-save.
 // 11: Rename updateChromeCategories to fetchChromeCategories.
+// 12: Add a field to cache mapping from UI track ID to trace track ID in order
+//     to speed up flow arrows rendering.
 // "[...]HeapProfileFlamegraph[...]" -> "[...]Flamegraph[...]".
-export const STATE_VERSION = 11;
+export const STATE_VERSION = 12;
 
 export const SCROLLING_TRACK_GROUP = 'ScrollingTracks';
 
@@ -365,6 +367,7 @@
   traceUuid?: string;
   trackGroups: ObjectById<TrackGroupState>;
   tracks: ObjectById<TrackState>;
+  uiTrackIdByTraceTrackId: Map<number, string>;
   areas: ObjectById<AreaById>;
   aggregatePreferences: ObjectById<AggregationState>;
   visibleTracks: string[];
@@ -831,6 +834,7 @@
     engines: {},
     traceTime: {...defaultTraceTime},
     tracks: {},
+    uiTrackIdByTraceTrackId: new Map<number, string>(),
     aggregatePreferences: {},
     trackGroups: {},
     visibleTracks: [],
diff --git a/ui/src/controller/flow_events_controller.ts b/ui/src/controller/flow_events_controller.ts
index a5a80cd..fe703b7 100644
--- a/ui/src/controller/flow_events_controller.ts
+++ b/ui/src/controller/flow_events_controller.ts
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 import {Engine} from '../common/engine';
+import {featureFlags} from '../common/feature_flags';
 import {NUM, STR_NULL} from '../common/query_result';
 import {Area} from '../common/state';
 import {fromNs, toNs} from '../common/time';
@@ -34,6 +35,15 @@
   engine: Engine;
 }
 
+const SHOW_INDIRECT_PRECEDING_FLOWS_FLAG = featureFlags.register({
+  id: 'showIndirectPrecedingFlows',
+  name: 'Show indirect preceding flows',
+  description: 'Show indirect preceding flows (connected through ancestor ' +
+      'slices) when a slice is selected.',
+  defaultValue: false,
+});
+
+
 export class FlowEventsController extends Controller<'main'> {
   private lastSelectedSliceId?: number;
   private lastSelectedArea?: Area;
@@ -128,6 +138,14 @@
     this.lastSelectedSliceId = sliceId;
     this.lastSelectedKind = 'CHROME_SLICE';
 
+    const connectedFlows = SHOW_INDIRECT_PRECEDING_FLOWS_FLAG.get() ?
+        `(
+           select * from directly_connected_flow(${sliceId})
+           union
+           select * from preceding_flow(${sliceId})
+         )` :
+        `directly_connected_flow(${sliceId})`;
+
     const query = `
     select
       f.slice_out as beginSliceId,
@@ -147,7 +165,7 @@
       extract_arg(f.arg_set_id, 'cat') as category,
       extract_arg(f.arg_set_id, 'name') as name,
       f.id as id
-    from directly_connected_flow(${sliceId}) f
+    from ${connectedFlows} f
     join slice t1 on f.slice_out = t1.slice_id
     join slice t2 on f.slice_in = t2.slice_id
     `;
diff --git a/ui/src/frontend/flow_events_panel.ts b/ui/src/frontend/flow_events_panel.ts
index 6ed7656..b8fa46f 100644
--- a/ui/src/frontend/flow_events_panel.ts
+++ b/ui/src/frontend/flow_events_panel.ts
@@ -19,7 +19,6 @@
 import {Flow, globals} from './globals';
 import {BLANK_CHECKBOX, CHECKBOX} from './icons';
 import {Panel, PanelSize} from './panel';
-import {findUiTrackId} from './scroll_helper';
 
 export const ALL_CATEGORIES = '_all_';
 
@@ -44,7 +43,7 @@
     }
 
     const flowClickHandler = (sliceId: number, trackId: number) => {
-      const uiTrackId = findUiTrackId(trackId);
+      const uiTrackId = globals.state.uiTrackIdByTraceTrackId.get(trackId);
       if (uiTrackId) {
         globals.makeSelection(
             Actions.selectChromeSlice(
diff --git a/ui/src/frontend/flow_events_renderer.ts b/ui/src/frontend/flow_events_renderer.ts
index c435ef8..3a989cd 100644
--- a/ui/src/frontend/flow_events_renderer.ts
+++ b/ui/src/frontend/flow_events_renderer.ts
@@ -16,7 +16,6 @@
 import {ALL_CATEGORIES, getFlowCategories} from './flow_events_panel';
 import {Flow, FlowPoint, globals} from './globals';
 import {PanelVNode} from './panel';
-import {findUiTrackId} from './scroll_helper';
 import {SliceRect} from './track';
 import {TrackGroupPanel} from './track_group_panel';
 import {TrackPanel} from './track_panel';
@@ -98,7 +97,7 @@
 
 export class FlowEventsRenderer {
   private getTrackGroupIdByTrackId(trackId: number): string|undefined {
-    const uiTrackId = findUiTrackId(trackId);
+    const uiTrackId = globals.state.uiTrackIdByTraceTrackId.get(trackId);
     return uiTrackId ? globals.state.tracks[uiTrackId].trackGroup : undefined;
   }
 
diff --git a/ui/src/frontend/keyboard_event_handler.ts b/ui/src/frontend/keyboard_event_handler.ts
index 17a60dc..4f21346 100644
--- a/ui/src/frontend/keyboard_event_handler.ts
+++ b/ui/src/frontend/keyboard_event_handler.ts
@@ -20,7 +20,6 @@
 import {Flow, globals} from './globals';
 import {toggleHelp} from './help_modal';
 import {
-  findUiTrackId,
   horizontalScrollAndZoomToRange,
   verticalScrollToTrack
 } from './scroll_helper';
@@ -163,7 +162,8 @@
   for (const flow of globals.connectedFlows) {
     if (flow.id === flowId) {
       const flowPoint = (direction === 'Backward' ? flow.begin : flow.end);
-      const uiTrackId = findUiTrackId(flowPoint.trackId);
+      const uiTrackId =
+          globals.state.uiTrackIdByTraceTrackId.get(flowPoint.trackId);
       if (uiTrackId) {
         globals.makeSelection(Actions.selectChromeSlice(
             {id: flowPoint.sliceId, trackId: uiTrackId, table: 'slice'}));
diff --git a/ui/src/frontend/notes_panel.ts b/ui/src/frontend/notes_panel.ts
index 2ce959a..44489cb 100644
--- a/ui/src/frontend/notes_panel.ts
+++ b/ui/src/frontend/notes_panel.ts
@@ -24,6 +24,7 @@
 import {globals} from './globals';
 import {gridlines} from './gridline_helper';
 import {Panel, PanelSize} from './panel';
+import {isTraceLoaded} from './sidebar';
 
 const FLAG_WIDTH = 16;
 const AREA_TRIANGLE_WIDTH = 10;
@@ -61,12 +62,30 @@
   }
 
   view() {
-    return m('.notes-panel', {
-      onclick: (e: PerfettoMouseEvent) => {
-        this.onClick(e.layerX - TRACK_SHELL_WIDTH, e.layerY);
-        e.stopPropagation();
-      },
-    });
+    const allCollapsed = Object.values(globals.state.trackGroups)
+                             .every((group) => group.collapsed);
+
+    return m(
+        '.notes-panel',
+        {
+          onclick: (e: PerfettoMouseEvent) => {
+            this.onClick(e.layerX - TRACK_SHELL_WIDTH, e.layerY);
+            e.stopPropagation();
+          },
+        },
+        isTraceLoaded() ?
+            m('button',
+              {
+                onclick: (e: Event) => {
+                  e.preventDefault();
+                  globals.dispatch(
+                      Actions.toggleAllTrackGroups({collapsed: !allCollapsed}));
+                }
+              },
+              m('i.material-icons',
+                {title: allCollapsed ? 'Expand all' : 'Collapse all'},
+                allCollapsed ? 'unfold_more' : 'unfold_less')) :
+            '');
   }
 
   renderCanvas(ctx: CanvasRenderingContext2D, size: PanelSize) {
diff --git a/ui/src/frontend/query_table.ts b/ui/src/frontend/query_table.ts
index aaf4742..8e94fd5 100644
--- a/ui/src/frontend/query_table.ts
+++ b/ui/src/frontend/query_table.ts
@@ -25,7 +25,6 @@
 import {Panel} from './panel';
 import {Router} from './router';
 import {
-  findUiTrackId,
   horizontalScrollAndZoomToRange,
   verticalScrollToTrack
 } from './scroll_helper';
@@ -57,8 +56,8 @@
     const sliceDur = fromNs(Math.max(row.dur as number, 1));
     const sliceEnd = sliceStart + sliceDur;
     const trackId = row.track_id as number;
-    const uiTrackId = findUiTrackId(trackId);
-    if (uiTrackId === null) return;
+    const uiTrackId = globals.state.uiTrackIdByTraceTrackId.get(trackId);
+    if (uiTrackId === undefined) return;
     verticalScrollToTrack(uiTrackId, true);
     horizontalScrollAndZoomToRange(sliceStart, sliceEnd);
     let sliceId: number|undefined;
diff --git a/ui/src/frontend/scroll_helper.ts b/ui/src/frontend/scroll_helper.ts
index be24399..3db08a7 100644
--- a/ui/src/frontend/scroll_helper.ts
+++ b/ui/src/frontend/scroll_helper.ts
@@ -107,21 +107,3 @@
   }
   horizontalScrollToTs(ts);
 }
-
-/**
- * Returns the UI track Id that is associated with the given |traceTrackId| in
- * the trace_processor. Due to concepts like Async tracks and TrackGroups this
- * is not always a one to one mapping.
- */
-export function findUiTrackId(traceTrackId: number) {
-  for (const [uiTrackId, trackState] of Object.entries(globals.state.tracks)) {
-    const config = trackState.config as {trackId: number};
-    if (config.trackId === traceTrackId) return uiTrackId;
-    const multiple = trackState.config as {trackIds: number[]};
-    if (multiple.trackIds !== undefined &&
-        multiple.trackIds.includes(traceTrackId)) {
-      return uiTrackId;
-    }
-  }
-  return null;
-}
diff --git a/ui/src/frontend/sidebar.ts b/ui/src/frontend/sidebar.ts
index 8b46c4f..0ada41c 100644
--- a/ui/src/frontend/sidebar.ts
+++ b/ui/src/frontend/sidebar.ts
@@ -411,7 +411,7 @@
       });
 }
 
-function isTraceLoaded(): boolean {
+export function isTraceLoaded(): boolean {
   const engine = Object.values(globals.state.engines)[0];
   return engine !== undefined;
 }