Merge "Revert "Annotate trace packets with trusted PID in traced""
diff --git a/Android.bp b/Android.bp
index 290dfd8..333d1b1 100644
--- a/Android.bp
+++ b/Android.bp
@@ -3692,6 +3692,7 @@
         "protos/perfetto/metrics/android/powrails_metric.proto",
         "protos/perfetto/metrics/android/process_metadata.proto",
         "protos/perfetto/metrics/android/profiler_smaps.proto",
+        "protos/perfetto/metrics/android/rt_runtime_metric.proto",
         "protos/perfetto/metrics/android/simpleperf.proto",
         "protos/perfetto/metrics/android/startup_metric.proto",
         "protos/perfetto/metrics/android/surfaceflinger.proto",
@@ -3751,6 +3752,7 @@
         "protos/perfetto/metrics/android/powrails_metric.proto",
         "protos/perfetto/metrics/android/process_metadata.proto",
         "protos/perfetto/metrics/android/profiler_smaps.proto",
+        "protos/perfetto/metrics/android/rt_runtime_metric.proto",
         "protos/perfetto/metrics/android/simpleperf.proto",
         "protos/perfetto/metrics/android/startup_metric.proto",
         "protos/perfetto/metrics/android/surfaceflinger.proto",
@@ -8094,6 +8096,7 @@
         "src/trace_processor/metrics/sql/android/android_package_list.sql",
         "src/trace_processor/metrics/sql/android/android_powrails.sql",
         "src/trace_processor/metrics/sql/android/android_proxy_power.sql",
+        "src/trace_processor/metrics/sql/android/android_rt_runtime.sql",
         "src/trace_processor/metrics/sql/android/android_simpleperf.sql",
         "src/trace_processor/metrics/sql/android/android_startup.sql",
         "src/trace_processor/metrics/sql/android/android_surfaceflinger.sql",
diff --git a/BUILD b/BUILD
index 3e7abd7..21df06b 100644
--- a/BUILD
+++ b/BUILD
@@ -1059,6 +1059,7 @@
         "src/trace_processor/metrics/sql/android/android_package_list.sql",
         "src/trace_processor/metrics/sql/android/android_powrails.sql",
         "src/trace_processor/metrics/sql/android/android_proxy_power.sql",
+        "src/trace_processor/metrics/sql/android/android_rt_runtime.sql",
         "src/trace_processor/metrics/sql/android/android_simpleperf.sql",
         "src/trace_processor/metrics/sql/android/android_startup.sql",
         "src/trace_processor/metrics/sql/android/android_surfaceflinger.sql",
@@ -2627,6 +2628,7 @@
         "protos/perfetto/metrics/android/powrails_metric.proto",
         "protos/perfetto/metrics/android/process_metadata.proto",
         "protos/perfetto/metrics/android/profiler_smaps.proto",
+        "protos/perfetto/metrics/android/rt_runtime_metric.proto",
         "protos/perfetto/metrics/android/simpleperf.proto",
         "protos/perfetto/metrics/android/startup_metric.proto",
         "protos/perfetto/metrics/android/surfaceflinger.proto",
diff --git a/docs/contributing/getting-started.md b/docs/contributing/getting-started.md
index 8cb27ec..9696e7e 100644
--- a/docs/contributing/getting-started.md
+++ b/docs/contributing/getting-started.md
@@ -47,8 +47,6 @@
 ## Community
 
 You can reach us on our [Discord channel](https://discord.gg/35ShE3A).
-If you prefer using IRC we have an experimental Discord <> IRC bridge
-synced with `#perfetto-dev` on [Freenode](https://webchat.freenode.net/).
 
 Mailing list: https://groups.google.com/forum/#!forum/perfetto-dev
 
diff --git a/docs/visualization/perfetto-ui-release-process.md b/docs/visualization/perfetto-ui-release-process.md
index 8f12b1a..ed0af8d 100644
--- a/docs/visualization/perfetto-ui-release-process.md
+++ b/docs/visualization/perfetto-ui-release-process.md
@@ -103,3 +103,6 @@
 [go/perfetto-ui-autopush](http://go/perfetto-ui-autopush) and
 [go/perfetto-ui-channels](http://go/perfetto-ui-channels) for the design docs of
 the serving infrastructure.
+
+## Publishing the Perfetto Chrome extension
+Googlers: see go/perfetto-release-chrome-extension
diff --git a/include/perfetto/base/logging.h b/include/perfetto/base/logging.h
index eaa5710..3599dda 100644
--- a/include/perfetto/base/logging.h
+++ b/include/perfetto/base/logging.h
@@ -147,7 +147,8 @@
                           __LINE__, ##__VA_ARGS__);                           \
   } while (0)
 #elif defined(PERFETTO_DISABLE_LOG)
-#define PERFETTO_XLOG(...) ::perfetto::base::ignore_result(__VA_ARGS__)
+#define PERFETTO_XLOG(level, fmt, ...) ::perfetto::base::ignore_result(level, \
+                                fmt, ##__VA_ARGS__)
 #else
 #define PERFETTO_XLOG(level, fmt, ...)                                      \
   ::perfetto::base::LogMessage(level, ::perfetto::base::Basename(__FILE__), \
diff --git a/infra/discord-irc-bridge/Dockerfile b/infra/discord-irc-bridge/Dockerfile
deleted file mode 100644
index 5591da2..0000000
--- a/infra/discord-irc-bridge/Dockerfile
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM node:latest
-
-RUN useradd -m discord_irc
-RUN npm install -g discord-irc
-RUN apt-get -y update && apt-get -y install supervisor
-COPY supervisord.conf /etc/
-COPY discord-irc.json /etc/
-COPY start.py /
-USER discord_irc
-
-CMD ["python", "start.py"]
\ No newline at end of file
diff --git a/infra/discord-irc-bridge/README.md b/infra/discord-irc-bridge/README.md
deleted file mode 100644
index 9c79d2d..0000000
--- a/infra/discord-irc-bridge/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# IRC <> Discord bridge
-
-This directory contains the docker image for the discord<>IRC bot.
-The docker container is built and pushed running:
-
-```bash
-docker build -t gcr.io/perfetto-irc/discord-irc-bridge .
-docker push gcr.io/perfetto-irc/discord-irc-bridge
-```
-
-The docker container requires two environment variables to be set (see below).
-These are set at the GCE project level (project: perfetto-irc).
-There is a VM template in the GCE project which has the right env vars set.
-If a VM restart is required use the template, don't create the VM from scratch.
-
-NICKNAME: This must be set to perfetto_discord:password. The password can be
-  obtained on the usual internal website for passwords. Look for the account
-  "perfetto_discord@freenode".
-
-DISCORD_TOKEN: This must be set to the Discord token for the bot. Look for
-  the account "perfetto-discord-bot-token" in the internal password website.
diff --git a/infra/discord-irc-bridge/discord-irc.json b/infra/discord-irc-bridge/discord-irc.json
deleted file mode 100644
index bf71f2a..0000000
--- a/infra/discord-irc-bridge/discord-irc.json
+++ /dev/null
@@ -1,23 +0,0 @@
-[
-  {
-    "server": "irc.freenode.org",
-    "autoSendCommands": [
-    ],
-    "channelMapping": {
-      "629013441096450058": "#perfetto-dev"
-    },
-    "ircOptions": {
-      "port": "6697",
-      "secure": true
-    },
-    "ircNickColor": false,
-    "ircPreventMention": true,
-    "commandCharacters": [],
-    "ignoreUsers": {
-      "irc": [],
-      "discord": []
-    },
-    "webhooks": {
-    }
-  }
-]
diff --git a/infra/discord-irc-bridge/start.py b/infra/discord-irc-bridge/start.py
deleted file mode 100644
index 35bbb93..0000000
--- a/infra/discord-irc-bridge/start.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import json
-import os
-import sys
-
-with open("/etc/discord-irc.json") as f:
-  cfg = json.load(f)
-
-cfg[0]["nickname"] = os.getenv("NICKNAME")
-cfg[0]["discordToken"] = os.getenv("DISCORD_TOKEN")
-
-if cfg[0]["nickname"] is None:
-  sys.stderr.write("NICKNAME env var not set\n")
-  sys.exit(1)
-
-if cfg[0]["discordToken"] is None:
-  sys.stderr.write("DISCORD_TOKEN env var not set\n")
-  sys.exit(1)
-
-with open("/tmp/discord-irc-merged.json", "w") as f:
-  json.dump(cfg, f)
-
-os.execl("/usr/bin/supervisord", "supervisord")
diff --git a/infra/discord-irc-bridge/supervisord.conf b/infra/discord-irc-bridge/supervisord.conf
deleted file mode 100644
index 713b332..0000000
--- a/infra/discord-irc-bridge/supervisord.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-[supervisord]
-nodaemon=true
-logfile=/dev/stdout
-logfile_maxbytes=0
-
-[program:discord_irc]
-command=discord-irc --config /tmp/discord-irc-merged.json
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stdout_logfile_backups=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-stderr_logfile_backups=0
diff --git a/protos/perfetto/common/builtin_clock.proto b/protos/perfetto/common/builtin_clock.proto
index 153c5b2..dba9ec9 100644
--- a/protos/perfetto/common/builtin_clock.proto
+++ b/protos/perfetto/common/builtin_clock.proto
@@ -29,4 +29,8 @@
   BUILTIN_CLOCK_MAX_ID = 63;
 
   reserved 7, 8;
+
+  // An internal CL (ag/16521245) has taken this for BUILTIN_CLOCK_TSC.
+  // That might get upstreamed later on. Avoid diverging on this ID in future.
+  reserved 9;
 }
diff --git a/protos/perfetto/config/perfetto_config.proto b/protos/perfetto/config/perfetto_config.proto
index 407597a..69078d4 100644
--- a/protos/perfetto/config/perfetto_config.proto
+++ b/protos/perfetto/config/perfetto_config.proto
@@ -258,6 +258,10 @@
   BUILTIN_CLOCK_MAX_ID = 63;
 
   reserved 7, 8;
+
+  // An internal CL (ag/16521245) has taken this for BUILTIN_CLOCK_TSC.
+  // That might get upstreamed later on. Avoid diverging on this ID in future.
+  reserved 9;
 }
 
 // End of protos/perfetto/common/builtin_clock.proto
diff --git a/protos/perfetto/metrics/android/BUILD.gn b/protos/perfetto/metrics/android/BUILD.gn
index f09aa37..3e0b2e7 100644
--- a/protos/perfetto/metrics/android/BUILD.gn
+++ b/protos/perfetto/metrics/android/BUILD.gn
@@ -46,6 +46,7 @@
     "powrails_metric.proto",
     "process_metadata.proto",
     "profiler_smaps.proto",
+    "rt_runtime_metric.proto",
     "simpleperf.proto",
     "startup_metric.proto",
     "surfaceflinger.proto",
diff --git a/protos/perfetto/metrics/android/rt_runtime_metric.proto b/protos/perfetto/metrics/android/rt_runtime_metric.proto
new file mode 100644
index 0000000..edb66c5
--- /dev/null
+++ b/protos/perfetto/metrics/android/rt_runtime_metric.proto
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package perfetto.protos;
+
+// measure max RT runtime and RT tasks running over 5ms.
+message AndroidRtRuntimeMetric {
+  message RtSlice {
+    // thread name
+    optional string tname = 1;
+    // timestamp
+    optional int64 ts = 2;
+    // runtime of RT task
+    optional int64 dur = 3;
+  }
+
+  // max runtime of RT tasks
+  optional int64 max_runtime = 1;
+  // how many RT tasks are over 5ms.
+  optional int64 over_5ms_count = 2;
+  // information for top 10 RT tasks
+  repeated RtSlice longest_rt_slices = 3;
+}
+
diff --git a/protos/perfetto/metrics/metrics.proto b/protos/perfetto/metrics/metrics.proto
index d8a29a1..8b1a173 100644
--- a/protos/perfetto/metrics/metrics.proto
+++ b/protos/perfetto/metrics/metrics.proto
@@ -43,6 +43,7 @@
 import "protos/perfetto/metrics/android/package_list.proto";
 import "protos/perfetto/metrics/android/powrails_metric.proto";
 import "protos/perfetto/metrics/android/profiler_smaps.proto";
+import "protos/perfetto/metrics/android/rt_runtime_metric.proto";
 import "protos/perfetto/metrics/android/simpleperf.proto";
 import "protos/perfetto/metrics/android/startup_metric.proto";
 import "protos/perfetto/metrics/android/surfaceflinger.proto";
@@ -96,7 +97,7 @@
 
 // Root message for all Perfetto-based metrics.
 //
-// Next id: 42
+// Next id: 43
 message TraceMetrics {
   reserved 4, 10, 13, 14, 16, 19;
 
@@ -206,6 +207,8 @@
   // is clear that this data is necessary.
   optional AndroidCameraUnaggregatedMetric android_camera_unagg = 41;
 
+  optional AndroidRtRuntimeMetric android_rt_runtime = 42;
+
   // Demo extensions.
   extensions 450 to 499;
 
diff --git a/protos/perfetto/metrics/perfetto_merged_metrics.proto b/protos/perfetto/metrics/perfetto_merged_metrics.proto
index 100c76c..7472ce3 100644
--- a/protos/perfetto/metrics/perfetto_merged_metrics.proto
+++ b/protos/perfetto/metrics/perfetto_merged_metrics.proto
@@ -980,6 +980,30 @@
 
 // End of protos/perfetto/metrics/android/profiler_smaps.proto
 
+// Begin of protos/perfetto/metrics/android/rt_runtime_metric.proto
+
+// measure max RT runtime and RT tasks running over 5ms.
+message AndroidRtRuntimeMetric {
+  message RtSlice {
+    // thread name
+    optional string tname = 1;
+    // timestamp
+    optional int64 ts = 2;
+    // runtime of RT task
+    optional int64 dur = 3;
+  }
+
+  // max runtime of RT tasks
+  optional int64 max_runtime = 1;
+  // how many RT tasks are over 5ms.
+  optional int64 over_5ms_count = 2;
+  // information for top 10 RT tasks
+  repeated RtSlice longest_rt_slices = 3;
+}
+
+
+// End of protos/perfetto/metrics/android/rt_runtime_metric.proto
+
 // Begin of protos/perfetto/metrics/android/simpleperf.proto
 
 // Metric that stores information related to atrace events generated by
@@ -1396,7 +1420,7 @@
 
 // Root message for all Perfetto-based metrics.
 //
-// Next id: 42
+// Next id: 43
 message TraceMetrics {
   reserved 4, 10, 13, 14, 16, 19;
 
@@ -1506,6 +1530,8 @@
   // is clear that this data is necessary.
   optional AndroidCameraUnaggregatedMetric android_camera_unagg = 41;
 
+  optional AndroidRtRuntimeMetric android_rt_runtime = 42;
+
   // Demo extensions.
   extensions 450 to 499;
 
diff --git a/protos/perfetto/trace/perfetto_trace.proto b/protos/perfetto/trace/perfetto_trace.proto
index e9afd30..b14a0f4 100644
--- a/protos/perfetto/trace/perfetto_trace.proto
+++ b/protos/perfetto/trace/perfetto_trace.proto
@@ -258,6 +258,10 @@
   BUILTIN_CLOCK_MAX_ID = 63;
 
   reserved 7, 8;
+
+  // An internal CL (ag/16521245) has taken this for BUILTIN_CLOCK_TSC.
+  // That might get upstreamed later on. Avoid diverging on this ID in future.
+  reserved 9;
 }
 
 // End of protos/perfetto/common/builtin_clock.proto
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker.cc b/src/trace_processor/importers/proto/heap_graph_tracker.cc
index d1c298a..182265e 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker.cc
+++ b/src/trace_processor/importers/proto/heap_graph_tracker.cc
@@ -296,7 +296,9 @@
       cleaner_thunk_this0_str_id_(context_->storage->InternString(
           "libcore.util.NativeAllocationRegistry$CleanerThunk.this$0")),
       native_size_str_id_(context_->storage->InternString(
-          "libcore.util.NativeAllocationRegistry.size")) {}
+          "libcore.util.NativeAllocationRegistry.size")),
+      cleaner_next_str_id_(
+          context_->storage->InternString("sun.misc.Cleaner.next")) {}
 
 HeapGraphTracker::SequenceState& HeapGraphTracker::GetOrCreateSequence(
     uint32_t seq_id) {
@@ -760,22 +762,26 @@
     auto cleaner_objs = objects_tbl.FilterToRowMap(
         {objects_tbl.type_id().eq(class_id.value),
          objects_tbl.upid().eq(seq.current_upid),
-         objects_tbl.graph_sample_ts().eq(seq.current_ts),
-         // If a Cleaner is not reachable, its associated native memory must
-         // have been already freed. Skip it.
-         objects_tbl.reachable().ne_value(SqlValue::Long(0)),
-        });
+         objects_tbl.graph_sample_ts().eq(seq.current_ts)});
     for (auto obj_it = cleaner_objs.IterateRows(); obj_it; obj_it.Next()) {
+      tables::HeapGraphObjectTable::Id cleaner_obj_id =
+          objects_tbl.id()[obj_it.row()];
       base::Optional<tables::HeapGraphObjectTable::Id> referent_id =
-          GetReferenceByFieldName(objects_tbl.id()[obj_it.row()],
-                                  referent_str_id_);
+          GetReferenceByFieldName(cleaner_obj_id, referent_str_id_);
       base::Optional<tables::HeapGraphObjectTable::Id> thunk_id =
-          GetReferenceByFieldName(objects_tbl.id()[obj_it.row()],
-                                  cleaner_thunk_str_id_);
+          GetReferenceByFieldName(cleaner_obj_id, cleaner_thunk_str_id_);
 
       if (!referent_id || !thunk_id) {
         continue;
       }
+
+      base::Optional<tables::HeapGraphObjectTable::Id> next_id =
+          GetReferenceByFieldName(cleaner_obj_id, cleaner_next_str_id_);
+      if (next_id.has_value() && *next_id == cleaner_obj_id) {
+        // sun.misc.Cleaner.next points to the sun.misc.Cleaner: this means
+        // that the sun.misc.Cleaner.clean() has already been called. Skip this.
+        continue;
+      }
       cleaners.push_back(Cleaner{*referent_id, *thunk_id});
     }
   }
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker.h b/src/trace_processor/importers/proto/heap_graph_tracker.h
index f355175..7f804ed 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker.h
+++ b/src/trace_processor/importers/proto/heap_graph_tracker.h
@@ -236,6 +236,7 @@
   StringPool::Id referent_str_id_;
   StringPool::Id cleaner_thunk_this0_str_id_;
   StringPool::Id native_size_str_id_;
+  StringPool::Id cleaner_next_str_id_;
 };
 
 }  // namespace trace_processor
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc b/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
index 9eb39f5..6e76214 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
+++ b/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
@@ -62,7 +62,7 @@
   tracker.AddInternedLocationName(kSeqId, kLocation,
                                   context.storage->InternString("location"));
 
-  enum Fields : uint64_t { kReferent = 1, kThunk, kThis0 };
+  enum Fields : uint64_t { kReferent = 1, kThunk, kThis0, kNext };
 
   tracker.AddInternedFieldName(kSeqId, kReferent,
                                "java.lang.ref.Reference.referent");
@@ -70,6 +70,7 @@
   tracker.AddInternedFieldName(
       kSeqId, kThis0,
       "libcore.util.NativeAllocationRegistry$CleanerThunk.this$0");
+  tracker.AddInternedFieldName(kSeqId, kNext, "sun.misc.Cleaner.next");
 
   enum Types : uint64_t {
     kTypeBitmap = 1,
@@ -89,7 +90,7 @@
   tracker.AddInternedType(
       kSeqId, kTypeCleaner, context.storage->InternString("sun.misc.Cleaner"),
       kLocation, /*object_size=*/0,
-      /*reference_field_name_ids=*/{kReferent, kThunk}, /*superclass_id=*/0,
+      /*reference_field_name_ids=*/{kReferent, kThunk, kNext}, /*superclass_id=*/0,
       /*classloader_id=*/0, /*no_reference_fields=*/false,
       /*kind=*/normal_kind);
 
@@ -129,7 +130,7 @@
     HeapGraphTracker::SourceObject obj;
     obj.object_id = kObjCleaner;
     obj.type_id = kTypeCleaner;
-    obj.referred_objects = {kObjBitmap, kObjThunk};
+    obj.referred_objects = {kObjBitmap, kObjThunk, 0};
 
     tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
   }
@@ -155,13 +156,6 @@
     tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
   }
 
-  {
-    HeapGraphTracker::SourceRoot root;
-    root.root_type = context.storage->InternString("ROOT");
-    root.object_ids.emplace_back(kObjCleaner);
-    tracker.AddRoot(kSeqId, kPid, kTimestamp, std::move(root));
-  }
-
   tracker.FinalizeProfile(kSeqId);
 
   const auto& objs_table = context.storage->heap_graph_object_table();
diff --git a/src/trace_processor/metrics/sql/BUILD.gn b/src/trace_processor/metrics/sql/BUILD.gn
index 9432bf5..6d9f8f3 100644
--- a/src/trace_processor/metrics/sql/BUILD.gn
+++ b/src/trace_processor/metrics/sql/BUILD.gn
@@ -62,6 +62,7 @@
   "android/process_metadata.sql",
   "android/process_oom_score.sql",
   "android/profiler_smaps.sql",
+  "android/android_rt_runtime.sql",
   "android/mem_stats_priority_breakdown.sql",
   "android/android_multiuser.sql",
   "android/android_multiuser_populator.sql",
diff --git a/src/trace_processor/metrics/sql/android/android_rt_runtime.sql b/src/trace_processor/metrics/sql/android/android_rt_runtime.sql
new file mode 100644
index 0000000..01d8a64
--- /dev/null
+++ b/src/trace_processor/metrics/sql/android/android_rt_runtime.sql
@@ -0,0 +1,46 @@
+--
+-- Copyright 2022 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+--     https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+DROP VIEW IF EXISTS rt_runtime_all;
+
+CREATE VIEW rt_runtime_all
+AS
+SELECT ts, dur, thread.name AS tname
+FROM sched_slice
+LEFT JOIN thread
+  USING (utid)
+LEFT JOIN process
+  USING (upid)
+WHERE priority < 100
+ORDER BY dur DESC;
+
+DROP VIEW IF EXISTS android_rt_runtime_output;
+
+CREATE VIEW android_rt_runtime_output
+AS
+SELECT
+  AndroidRtRuntimeMetric(
+    'max_runtime',
+    (SELECT dur FROM rt_runtime_all LIMIT 1),
+    'over_5ms_count',
+    (SELECT COUNT(*) FROM rt_runtime_all WHERE dur > 5e6),
+    'longest_rt_slices',
+    (
+      SELECT
+        RepeatedField(
+          AndroidRtRuntimeMetric_RtSlice(
+            'tname', tname, 'ts', ts, 'dur', dur))
+      FROM (SELECT ts, dur, tname FROM rt_runtime_all LIMIT 10)
+    ));
diff --git a/src/trace_processor/metrics/sql/chrome/scroll_jank_cause_queuing_delay.sql b/src/trace_processor/metrics/sql/chrome/scroll_jank_cause_queuing_delay.sql
index d5424bd..d3762ef 100644
--- a/src/trace_processor/metrics/sql/chrome/scroll_jank_cause_queuing_delay.sql
+++ b/src/trace_processor/metrics/sql/chrome/scroll_jank_cause_queuing_delay.sql
@@ -30,15 +30,45 @@
     slice.name != "ThreadController active" AND
     (slice.depth = 0 OR ancestor.name = "ThreadController active");
 
+-- Sort track ids to optimize joining with slices
+-- as engine doesn't do the sort to join in O(LogN)
+-- per row by default
+DROP VIEW IF EXISTS chrome_annotated_threads_and_processes;
+CREATE VIEW chrome_annotated_threads_and_processes AS
+  SELECT
+    thread_track.id AS track_id,
+    chrome_thread.canonical_name AS thread_name,
+    chrome_process.process_type AS process_name
+  FROM
+    thread_track JOIN
+    chrome_thread JOIN
+    chrome_process ON
+    thread_track.utid = chrome_thread.utid AND
+    chrome_thread.upid = chrome_process.upid
+  ORDER BY
+    track_id ASC;
+
+-- See b/166441398 & crbug/1094361 for why we remove threadpool (originally
+-- the -to-End step). In essence -to-End is often reported on the ThreadPool
+-- after the fact with explicit timestamps so it being blocked isn't noteworthy.
+DROP VIEW IF EXISTS blocking_chrome_tasks_without_threadpool;
+CREATE VIEW blocking_chrome_tasks_without_threadpool AS
+  SELECT
+     slice.*,
+     annotations.thread_name AS thread_name,
+     annotations.process_name AS process_name
+  FROM
+    blocking_tasks_no_threadcontroller_active AS slice JOIN
+    chrome_annotated_threads_and_processes AS annotations ON
+    annotations.track_id = slice.track_id
+  WHERE
+    NOT (thread_name GLOB "*ThreadPool*");
+
 -- This view grabs any slice that could have prevented any GestureScrollUpdate
 -- flow event from being run (queuing delays). For RunTask we know that its
 -- generic (and thus hard to figure out whats the cause) so we grab the src
 -- location to make it more meaningful.
 --
--- See b/166441398 & crbug/1094361 for why we remove the -to-End step. In
--- essence -to-End is often reported on the ThreadPool after the fact with
--- explicit timestamps so it being blocked isn't noteworthy.
---
 -- See b/184134310 for why we allow depth == 1 and ancestor.id is null (which
 -- implies its a "ThreadController active" slice because we removed it
 -- previously).
@@ -72,14 +102,10 @@
     slice.*
   FROM
     scroll_flow_event_queuing_delay queuing JOIN
-    blocking_tasks_no_threadcontroller_active AS slice ON
+    blocking_chrome_tasks_without_threadpool AS slice ON
         slice.ts + slice.dur > queuing.ancestor_end AND
         queuing.maybe_next_ancestor_ts > slice.ts AND
-        slice.track_id = queuing.next_track_id AND
-        queuing.description NOT GLOB
-            "InputLatency.LatencyInfo.*ank.STEP_DRAW_AND_SWAP-to-End" AND
-        queuing.description NOT GLOB
-            "InputLatency.LatencyInfo.*ank.STEP_FINISHED_SWAP_BUFFERS-to-End"
+        slice.track_id = queuing.next_track_id
   WHERE
     queuing_time_ns IS NOT NULL AND
     queuing_time_ns > 0;
@@ -208,6 +234,8 @@
     dur_overlapping_ns,
     description,
     replace(file, rtrim(file, replace(file, '/', '')), '') AS file,
+    thread_name,
+    process_name,
     function,
     GROUP_CONCAT(
       CASE WHEN descendant_depth < invalid_depth OR descendant_major_slice THEN
@@ -252,7 +280,7 @@
     , "-") AS descendant_cpu_time
   FROM
     blocking_tasks_queuing_delay_with_invalid_depth
-  GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+  GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
   ORDER BY descendant_cpu_percentage DESC;
 
 -- Create a common name for each "cause" based on the slice stack we found.
@@ -281,8 +309,8 @@
 
 -- Join every row (jank and non-jank with the average non-jank time for the
 -- given metric_name).
-DROP VIEW IF EXISTS scroll_jank_cause_queuing_delay_unannotated;
-CREATE VIEW scroll_jank_cause_queuing_delay_unannotated AS
+DROP VIEW IF EXISTS scroll_jank_cause_queuing_delay;
+CREATE VIEW scroll_jank_cause_queuing_delay AS
   SELECT
     base.*,
     'InputLatency.LatencyInfo.Flow.QueuingDelay.' ||
@@ -293,15 +321,4 @@
   FROM
     scroll_jank_cause_queuing_delay_temp base LEFT JOIN
     scroll_jank_cause_queuing_delay_average_no_jank_time avg_no_jank ON
-        base.location = avg_no_jank.location;
-
--- Annotate with process and thread names.
-DROP VIEW IF EXISTS scroll_jank_cause_queuing_delay;
-CREATE VIEW scroll_jank_cause_queuing_delay AS
-SELECT p.process_type AS process_name, ct.canonical_name AS thread_name, s.*
-FROM scroll_jank_cause_queuing_delay_unannotated s,
-  thread_track tt, chrome_thread ct,
-  chrome_process p
-WHERE s.track_id = tt.id
-  AND tt.utid = ct.utid
-  AND ct.upid = p.upid;
+        base.location = avg_no_jank.location;
\ No newline at end of file
diff --git a/src/trace_processor/python/perfetto/trace_processor/api.py b/src/trace_processor/python/perfetto/trace_processor/api.py
index 56d9fc9..c52e628 100644
--- a/src/trace_processor/python/perfetto/trace_processor/api.py
+++ b/src/trace_processor/python/perfetto/trace_processor/api.py
@@ -13,12 +13,15 @@
 # limitations under the License.
 
 from urllib.parse import urlparse
+from typing import BinaryIO, Generator, List, Optional, Union
 
 from .http import TraceProcessorHttp
 from .loader import get_loader
 from .protos import ProtoFactory
 from .shell import load_shell
 
+# Union of types supported for a trace which can be loaded by shell.
+LoadableTrace = Union[None, str, BinaryIO, Generator[bytes, None, None]]
 
 # Custom exception raised if any trace_processor functions return a
 # response with an error defined
@@ -66,10 +69,23 @@
       # contents into lists based on the type of the batch
       batch_index = 0
       while True:
+        # It's possible on some occasions that there are non UTF-8 characters
+        # in the string_cells field. If this is the case, string_cells is
+        # a bytestring which needs to be decoded (but passing ignore so that
+        # we don't fail in decoding).
+        strings_batch_str = batches[batch_index].string_cells
+        try:
+          strings_batch_str = strings_batch_str.decode('utf-8', 'ignore')
+        except AttributeError:
+          # AttributeError can occur when |strings_batch_str| is an str which
+          # happens when everything in it is UTF-8 (protobuf automatically
+          # does the conversion if it can).
+          pass
+
         # Null-terminated strings in a batch are concatenated
         # into a single large byte array, so we split on the
         # null-terminator to get the individual strings
-        strings_batch = batches[batch_index].string_cells.split('\0')[:-1]
+        strings_batch = strings_batch_str.split('\0')[:-1]
         self.__data_lists[TraceProcessor.QUERY_CELL_STRING_FIELD_ID].extend(
             strings_batch)
         self.__data_lists[TraceProcessor.QUERY_CELL_VARINT_FIELD_ID].extend(
@@ -109,7 +125,6 @@
     # TraceProcesor.
     def as_pandas_dataframe(self):
       try:
-        import numpy as np
         import pandas as pd
 
         # Populate the dataframe with the query results
@@ -131,7 +146,8 @@
           rows.append(row)
 
         df = pd.DataFrame(rows, columns=self.__column_names)
-        return df.where(df.notnull(), None).reset_index(drop=True)
+        return df.astype(object).where(df.notnull(),
+                                       None).reset_index(drop=True)
 
       except ModuleNotFoundError:
         raise TraceProcessorException(
@@ -163,27 +179,67 @@
       return result
 
   def __init__(self,
-               addr=None,
-               file_path=None,
-               bin_path=None,
-               unique_port=True,
-               verbose=False):
-    # Load trace_processor_shell or access via given address
-    if addr:
-      p = urlparse(addr)
-      tp = TraceProcessorHttp(p.netloc if p.netloc else p.path)
-    else:
+               trace: LoadableTrace = None,
+               addr: Optional[str] = None,
+               bin_path: Optional[str] = None,
+               unique_port: bool = True,
+               verbose: bool = False,
+               file_path: Optional[str] = None):
+    """Create a trace processor instance.
+
+    Args:
+      trace: Trace to be loaded into the trace processor instance. One of
+        three types of argument is supported:
+        1) path to a trace file to open and read
+        2) a file like object (file, io.BytesIO or similar) to read
+        3) a generator yielding bytes
+      addr: address of a running trace processor instance. For advanced
+        use only.
+      bin_path: path to a trace processor shell binary. For advanced use
+        only.
+      unique_port: whether the trace processor shell instance should be
+        be started on a unique port. Only used when |addr| is not set.
+        For advanced use only.
+      verbose: whether trace processor shell should emit verbose logs;
+        can be very spammy. For advanced use only.
+      file_path (deprecated): path to a trace file to load. Please use
+        |trace| instead of this field: specifying both will cause
+        an exception to be thrown.
+    """
+
+    def create_tp_http():
+      if addr:
+        p = urlparse(addr)
+        return TraceProcessorHttp(p.netloc if p.netloc else p.path)
+
       url, self.subprocess = load_shell(
           bin_path=bin_path, unique_port=unique_port, verbose=verbose)
-      tp = TraceProcessorHttp(url)
-    self.http = tp
+      return TraceProcessorHttp(url)
+
+    if trace and file_path:
+      raise TraceProcessorException(
+          "trace and file_path cannot both be specified.")
+
+    self.http = create_tp_http()
     self.protos = ProtoFactory()
 
-    # Parse trace by its file_path into the loaded instance of trace_processor
     if file_path:
       get_loader().parse_file(self.http, file_path)
+    elif isinstance(trace, str):
+      get_loader().parse_file(self.http, trace)
+    elif hasattr(trace, 'read'):
+      while True:
+        chunk = trace.read(32 * 1024 * 1024)
+        if not chunk:
+          break
+        self.http.parse(chunk)
+      self.http.notify_eof()
+    elif trace:
+      for chunk in trace:
+        self.http.parse(chunk)
+      self.http.notify_eof()
 
-  def query(self, sql):
+  def query(self, sql: str):
     """Executes passed in SQL query using class defined HTTP API, and returns
     the response as a QueryResultIterator. Raises TraceProcessorException if
     the response returns with an error.
@@ -203,7 +259,7 @@
     return TraceProcessor.QueryResultIterator(response.column_names,
                                               response.batch)
 
-  def metric(self, metrics):
+  def metric(self, metrics: List[str]):
     """Returns the metrics data corresponding to the passed in trace metric.
     Raises TraceProcessorException if the response returns with an error.
 
@@ -241,7 +297,8 @@
   def __enter__(self):
     return self
 
-  def __exit__(self, _, __, ___):
+  def __exit__(self, a, b, c):
+    del a, b, c  # Unused.
     self.close()
     return False
 
diff --git a/src/trace_processor/python/perfetto/trace_processor/metrics.descriptor b/src/trace_processor/python/perfetto/trace_processor/metrics.descriptor
index e3a3ea8..053e838 100644
--- a/src/trace_processor/python/perfetto/trace_processor/metrics.descriptor
+++ b/src/trace_processor/python/perfetto/trace_processor/metrics.descriptor
Binary files differ
diff --git a/src/trace_processor/python/perfetto/trace_processor/metrics.descriptor.sha1 b/src/trace_processor/python/perfetto/trace_processor/metrics.descriptor.sha1
index 9658367..2291e71 100644
--- a/src/trace_processor/python/perfetto/trace_processor/metrics.descriptor.sha1
+++ b/src/trace_processor/python/perfetto/trace_processor/metrics.descriptor.sha1
@@ -2,5 +2,5 @@
 // SHA1(tools/gen_binary_descriptors)
 // 9fc6d77de57ec76a80b76aa282f4c7cf5ce55eec
 // SHA1(protos/perfetto/metrics/metrics.proto)
-// 3d9357a253dc649bdd67069d156fc6217f2e6a39
+// 22722c7fde543d5abd1299f48edd49c69c5f5c3e
   
\ No newline at end of file
diff --git a/src/trace_processor/storage/stats.h b/src/trace_processor/storage/stats.h
index b52a4c9..9fb73f8b 100644
--- a/src/trace_processor/storage/stats.h
+++ b/src/trace_processor/storage/stats.h
@@ -212,8 +212,7 @@
   kInfo,      // Diagnostic counters
   kDataLoss,  // Correct operation that still resulted in data loss
   kError      // If any kError counter is > 0 trace_processor_shell will
-              // raise an error. This is *not* surfaced in the web UI.
-              // TODO(b/148587181): Surface these errors in the UI.
+              // raise an error. This is also surfaced in the web UI.
 };
 
 enum Source {
diff --git a/src/trace_processor/trace_processor_shell.cc b/src/trace_processor/trace_processor_shell.cc
index 1eede6f..f601930 100644
--- a/src/trace_processor/trace_processor_shell.cc
+++ b/src/trace_processor/trace_processor_shell.cc
@@ -35,6 +35,7 @@
 #include "perfetto/base/time.h"
 #include "perfetto/ext/base/file_utils.h"
 #include "perfetto/ext/base/getopt.h"
+#include "perfetto/ext/base/optional.h"
 #include "perfetto/ext/base/scoped_file.h"
 #include "perfetto/ext/base/string_splitter.h"
 #include "perfetto/ext/base/string_utils.h"
@@ -1254,7 +1255,9 @@
       ".load-metrics-sql Reloads SQL from extension and custom metric paths\n"
       "                  specified in command line args.\n"
       ".run-metrics      Runs metrics specified in command line args\n"
-      "                  and prints the result.\n");
+      "                  and prints the result.\n"
+      ".width WIDTH      Changes the column width of interactive query\n"
+      "                  output.");
 }
 
 struct InteractiveOptions {
@@ -1268,6 +1271,7 @@
 util::Status StartInteractiveShell(const InteractiveOptions& options) {
   SetupLineEditor();
 
+  uint32_t column_width = options.column_width;
   for (;;) {
     ScopedLine line = GetLine("> ");
     if (!line)
@@ -1294,6 +1298,13 @@
         if (!status.ok()) {
           PERFETTO_ELOG("%s", status.c_message());
         }
+      } else if (strcmp(command, "width") == 0 && strlen(arg)) {
+        base::Optional<uint32_t> width = base::CStringToUInt32(arg);
+        if (!width) {
+          PERFETTO_ELOG("Invalid column width specified");
+          continue;
+        }
+        column_width = *width;
       } else if (strcmp(command, "load-metrics-sql") == 0) {
         base::Status status =
             LoadMetricsAndExtensionsSql(options.metrics, options.extensions);
@@ -1319,7 +1330,7 @@
 
     base::TimeNanos t_start = base::GetWallTimeNs();
     auto it = g_tp->ExecuteQuery(line.get());
-    PrintQueryResultInteractively(&it, t_start, options.column_width);
+    PrintQueryResultInteractively(&it, t_start, column_width);
   }
   return util::OkStatus();
 }
diff --git a/test/trace_processor/chrome/scroll_jank_cause_queuing_delay.out b/test/trace_processor/chrome/scroll_jank_cause_queuing_delay.out
index eedf5a2..a54d9b9 100644
--- a/test/trace_processor/chrome/scroll_jank_cause_queuing_delay.out
+++ b/test/trace_processor/chrome/scroll_jank_cause_queuing_delay.out
@@ -8,6 +8,7 @@
 "Renderer","Compositor",2918,0,7000,"InputLatency.LatencyInfo.Flow.QueuingDelay.NoJank.BlockingTasksUs.ScrollPredictor::ResampleScrollEvents"
 "Renderer","Compositor",2918,0,25000,"InputLatency.LatencyInfo.Flow.QueuingDelay.NoJank.BlockingTasksUs.InputHandlerProxy::HandleGestureScrollUpdate-DeltaUnits"
 "Renderer","Compositor",2918,0,6000,"InputLatency.LatencyInfo.Flow.QueuingDelay.NoJank.BlockingTasksUs.LatencyInfo.Flow"
+"Gpu","VizCompositorThread",2918,0,10000,"InputLatency.LatencyInfo.Flow.QueuingDelay.NoJank.BlockingTasksUs.LatencyInfo.Flow"
 "Browser","CrProcessMain",2926,1,52000,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.InputRouterImpl::GestureEventHandled-GestureEventQueue::ProcessGestureAck"
 "Browser","CrProcessMain",2926,1,17000,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.GestureProvider::OnTouchEvent"
 "Renderer","Compositor",2926,1,1208,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.WidgetInputHandlerImpl::DispatchNonBlockingEvent-LatencyInfo.Flow"
@@ -19,3 +20,6 @@
 "Gpu","VizCompositorThread",2926,1,2000,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.LatencyInfo.Flow"
 "Gpu","VizCompositorThread",2926,1,5000,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.LatencyInfo.Flow"
 "Gpu","VizCompositorThread",2926,1,8000,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.LatencyInfo.Flow"
+"Gpu","VizCompositorThread",2926,1,2000,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.LatencyInfo.Flow"
+"Gpu","VizCompositorThread",2926,1,8000,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.LatencyInfo.Flow"
+"Gpu","VizCompositorThread",2926,1,2000,"InputLatency.LatencyInfo.Flow.QueuingDelay.Jank.BlockingTasksUs.LatencyInfo.Flow"
diff --git a/test/trace_processor/chrome/scroll_jank_cause_queuing_delay_general_validation.out b/test/trace_processor/chrome/scroll_jank_cause_queuing_delay_general_validation.out
index 7e3393f..daf9c40 100644
--- a/test/trace_processor/chrome/scroll_jank_cause_queuing_delay_general_validation.out
+++ b/test/trace_processor/chrome/scroll_jank_cause_queuing_delay_general_validation.out
@@ -1,3 +1,3 @@
 
 "total","janky_latency_info_non_jank_avg_dur","non_janky_latency_info_non_jank_avg_dur"
-139,6358.208955,6358.208955
+139,6185.000000,6185.000000
diff --git a/test/trace_processor/python/api_integrationtest.py b/test/trace_processor/python/api_integrationtest.py
index 392e28e..9e7bef3 100644
--- a/test/trace_processor/python/api_integrationtest.py
+++ b/test/trace_processor/python/api_integrationtest.py
@@ -13,6 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import io
 import os
 import unittest
 
@@ -21,11 +22,11 @@
 
 class TestApi(unittest.TestCase):
 
-  def test_trace_file(self):
+  def test_trace_path(self):
     # Get path to trace_processor_shell and construct TraceProcessor
     tp = TraceProcessor(
-        file_path=os.path.join(os.environ["ROOT_DIR"], 'test', 'data',
-                               'example_android_trace_30s.pb'),
+        trace=os.path.join(os.environ["ROOT_DIR"], 'test', 'data',
+                           'example_android_trace_30s.pb'),
         bin_path=os.environ["SHELL_PATH"])
     qr_iterator = tp.query('select * from slice limit 10')
     dur_result = [
@@ -48,3 +49,51 @@
     self.assertEqual(count, expected_count)
 
     tp.close()
+
+  def test_trace_byteio(self):
+    f = io.BytesIO(
+        b'\n(\n&\x08\x00\x12\x12\x08\x01\x10\xc8\x01\x1a\x0b\x12\t'
+        b'B|200|foo\x12\x0e\x08\x02\x10\xc8\x01\x1a\x07\x12\x05E|200')
+    with TraceProcessor(trace=f, bin_path=os.environ["SHELL_PATH"]) as tp:
+      qr_iterator = tp.query('select * from slice limit 10')
+      res = list(qr_iterator)
+
+      self.assertEqual(len(res), 1)
+
+      row = res[0]
+      self.assertEqual(row.ts, 1)
+      self.assertEqual(row.dur, 1)
+      self.assertEqual(row.name, 'foo')
+
+  def test_trace_file(self):
+    path = os.path.join(os.environ["ROOT_DIR"], 'test', 'data',
+                        'example_android_trace_30s.pb')
+    with open(path, 'rb') as file:
+      with TraceProcessor(trace=file, bin_path=os.environ["SHELL_PATH"]) as tp:
+        qr_iterator = tp.query('select * from slice limit 10')
+        dur_result = [
+            178646, 119740, 58073, 155000, 173177, 20209377, 3589167, 90104,
+            275312, 65313
+        ]
+
+        for num, row in enumerate(qr_iterator):
+          self.assertEqual(row.dur, dur_result[num])
+
+  def test_trace_generator(self):
+
+    def reader_generator():
+      path = os.path.join(os.environ["ROOT_DIR"], 'test', 'data',
+                          'example_android_trace_30s.pb')
+      with open(path, 'rb') as file:
+        yield file.read(1024)
+
+    with TraceProcessor(
+        trace=reader_generator(), bin_path=os.environ["SHELL_PATH"]) as tp:
+      qr_iterator = tp.query('select * from slice limit 10')
+      dur_result = [
+          178646, 119740, 58073, 155000, 173177, 20209377, 3589167, 90104,
+          275312, 65313
+      ]
+
+      for num, row in enumerate(qr_iterator):
+        self.assertEqual(row.dur, dur_result[num])
diff --git a/tools/batch_trace_processor/main.py b/tools/batch_trace_processor/main.py
index 843e5f1..b73f7d9 100644
--- a/tools/batch_trace_processor/main.py
+++ b/tools/batch_trace_processor/main.py
@@ -23,24 +23,26 @@
 import pandas as pd
 import plotille
 
-from perfetto.trace_processor import TraceProcessorException
-
 from perfetto.batch_trace_processor.api import BatchTraceProcessor
-
-
-def prefix_path_column(path, df):
-  df['trace_file_path'] = path
-  return df
+from perfetto.trace_processor import TraceProcessorException
+from typing import List
 
 
 class TpBatchShell(cmd.Cmd):
 
-  def __init__(self, files, batch_tp):
+  def __init__(self, files: List[str], batch_tp: BatchTraceProcessor):
     super().__init__()
     self.files = files
     self.batch_tp = batch_tp
 
-  def do_histogram(self, arg):
+  def do_table(self, arg: str):
+    try:
+      data = self.batch_tp.query_and_flatten(arg)
+      print(data)
+    except TraceProcessorException as ex:
+      logging.error("Query failed: {}".format(ex))
+
+  def do_histogram(self, arg: str):
     try:
       data = self.batch_tp.query_single_result(arg)
       print(plotille.histogram(data))
@@ -48,7 +50,7 @@
     except TraceProcessorException as ex:
       logging.error("Query failed: {}".format(ex))
 
-  def do_vhistogram(self, arg):
+  def do_vhistogram(self, arg: str):
     try:
       data = self.batch_tp.query_single_result(arg)
       print(plotille.hist(data))
@@ -56,7 +58,7 @@
     except TraceProcessorException as ex:
       logging.error("Query failed: {}".format(ex))
 
-  def do_count(self, arg):
+  def do_count(self, arg: str):
     try:
       data = self.batch_tp.query_single_result(arg)
       counts = dict()
@@ -114,9 +116,10 @@
         queries_str = f.read()
 
       queries = [q.strip() for q in queries_str.split(";\n")]
-      out = [batch_tp.query(q) for q in queries if q][-1]
-      res = pd.concat(
-          [prefix_path_column(path, df) for (path, df) in zip(files, out)])
+      for q in queries[:-1]:
+        batch_tp.query(q)
+
+      res = batch_tp.query_and_flatten(queries[-1])
       print(res.to_csv(index=False))
 
     if args.interactive or not args.query_file:
diff --git a/tools/batch_trace_processor/perfetto/batch_trace_processor/api.py b/tools/batch_trace_processor/perfetto/batch_trace_processor/api.py
index 9fe272a..4ae5f14 100644
--- a/tools/batch_trace_processor/perfetto/batch_trace_processor/api.py
+++ b/tools/batch_trace_processor/perfetto/batch_trace_processor/api.py
@@ -13,54 +13,104 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from concurrent.futures import ThreadPoolExecutor
-from dataclasses import dataclass
-from perfetto.trace_processor import TraceProcessor, TraceProcessorException
+
+"""Contains classes for BatchTraceProcessor API."""
+
+import concurrent.futures as cf
+import dataclasses as dc
+from typing import Any, Callable, Dict, Tuple, Union, List
+
+import pandas as pd
+
+from perfetto.trace_processor import LoadableTrace
+from perfetto.trace_processor import TraceProcessor
+from perfetto.trace_processor import TraceProcessorException
 
 
-@dataclass
-class TpArg:
+@dc.dataclass
+class _TpArg:
   bin_path: str
   verbose: bool
-  file: str
+  trace: LoadableTrace
+
+
+@dc.dataclass
+class BatchLoadableTrace:
+  trace: LoadableTrace
+  args: Dict[str, str]
 
 
 class BatchTraceProcessor:
-  """BatchTraceProcessor is the blessed way of running ad-hoc queries on
-  Python across many Perfetto traces.
+  """Run ad-hoc SQL queries across many Perfetto traces.
 
   Usage:
-    with BatchTraceProcessor(file_paths=files) as btp:
+    with BatchTraceProcessor(traces) as btp:
       dfs = btp.query('select * from slice')
       for df in dfs:
         print(df)
   """
 
-  def __init__(self, file_paths, bin_path=None, verbose=False):
-    """Creates a batch trace processor instance: the blessed way of running
-    ad-hoc queries on Python across many traces.
+  def __init__(self,
+               traces: List[Union[LoadableTrace, BatchLoadableTrace]],
+               bin_path: str = None,
+               verbose: bool = False):
+    """Creates a batch trace processor instance.
+
+    BatchTraceProcessor is the blessed way of running ad-hoc queries in
+    Python across many traces.
 
     Args:
-      file_paths: List of trace file paths to load into this batch trace
-        processor instance.
+      traces: A list of traces to load into this instance. Each object in
+        the list can be one of the following types:
+        1) path to a trace file to open and read
+        2) a file like object (file, io.BytesIO or similar) to read
+        3) a generator yielding bytes
+        4) a BatchLoadableTrace object; this is basically a wrapper around
+           one of the above types plus an args field; see |query_and_flatten|
+           for the motivation for the args field.
       bin_path: Optional path to a trace processor shell binary to use to
         load the traces.
       verbose: Optional flag indiciating whether verbose trace processor
         output should be printed to stderr.
     """
-    self.executor = ThreadPoolExecutor()
-    self.paths = file_paths
-    self.closed = False
 
-    def create_tp(arg):
+    def _create_batch_trace(x: Union[LoadableTrace, BatchLoadableTrace]
+                           ) -> BatchLoadableTrace:
+      if isinstance(x, BatchLoadableTrace):
+        return x
+      return BatchLoadableTrace(trace=x, args={})
+
+    def create_tp(arg: _TpArg) -> TraceProcessor:
       return TraceProcessor(
-          file_path=arg.file, bin_path=arg.bin_path, verbose=arg.verbose)
+          trace=arg.trace, bin_path=arg.bin_path, verbose=arg.verbose)
 
-    tp_args = [TpArg(bin_path, verbose, file) for file in file_paths]
+    self.tps = None
+    self.closed = False
+    self.executor = cf.ThreadPoolExecutor()
+
+    batch_traces = [_create_batch_trace(t) for t in traces]
+    self.args = [t.args for t in batch_traces]
+
+    tp_args = [_TpArg(bin_path, verbose, t.trace) for t in batch_traces]
     self.tps = list(self.executor.map(create_tp, tp_args))
 
-  def query(self, sql):
-    """Executes the provided SQL statement in parallel across all the traces.
+  def metric(self, metrics: List[str]):
+    """Computes the provided metrics.
+
+    The computation happens in parallel across all the traces.
+
+    Args:
+      metrics: A list of valid metrics as defined in TraceMetrics
+
+    Returns:
+      A list of TraceMetric protos (one for each trace).
+    """
+    return self.execute(lambda tp: tp.metric(metrics))
+
+  def query(self, sql: str):
+    """Executes the provided SQL statement (returning a single row).
+
+    The execution happens in parallel across all the traces.
 
     Args:
       sql: The SQL statement to execute.
@@ -72,18 +122,51 @@
     Raises:
       TraceProcessorException: An error occurred running the query.
     """
-    return self.__execute_on_tps(lambda tp: tp.query(sql).as_pandas_dataframe())
+    return self.execute(lambda tp: tp.query(sql).as_pandas_dataframe())
 
-  def query_single_result(self, sql):
-    """Executes the provided SQL statement (which should return a single row)
-    in parallel across all the traces.
+  def query_and_flatten(self, sql: str):
+    """Executes the provided SQL statement and flattens the result.
+
+    The execution happens in parallel across all the traces and the
+    resulting Pandas dataframes are flattened into a single dataframe.
+
+    Args:
+      sql: The SQL statement to execute.
+
+    Returns:
+      A concatenated Pandas dataframe containing the result of executing the
+      query across all the traces.
+
+      If |BatchLoadableTrace| objects were passed to the constructor, the
+      contents of the |args| dictionary will also be emitted as extra columns
+      (key being column name, value being the value in the dataframe).
+
+      For example:
+        traces = [BatchLoadableTrace(trace='/tmp/path', args={"foo": "bar"})]
+        with BatchTraceProcessor(traces) as btp:
+          df = btp.query_and_flatten('select count(1) as cnt from slice')
+
+      Then df will look like this:
+        cnt             foo
+        100             bar
+
+    Raises:
+      TraceProcessorException: An error occurred running the query.
+    """
+    return self.execute_and_flatten(lambda tp: tp.query(sql).
+                                    as_pandas_dataframe())
+
+  def query_single_result(self, sql: str):
+    """Executes the provided SQL statement (returning a single row).
+
+    The execution happens in parallel across all the traces.
 
     Args:
       sql: The SQL statement to execute. This statement should return exactly
         one row on any trace.
 
     Returns:
-      A list of values with the result of executing the query (one per ftrace).
+      A list of values with the result of executing the query (one per trace).
 
     Raises:
       TraceProcessorException: An error occurred running the query or more than
@@ -101,31 +184,73 @@
 
       return df.iloc[0, 0]
 
-    return self.__execute_on_tps(query_single_result_inner)
+    return self.execute(query_single_result_inner)
+
+  def execute(self, fn: Callable[[TraceProcessor], Any]) -> List[Any]:
+    """Executes the provided function.
+
+    The execution happens in parallel across all the trace processor instances
+    owned by this object.
+
+    Args:
+      fn: The function to execute.
+
+    Returns:
+      A list of values with the result of executing the fucntion (one per
+      trace).
+    """
+    return list(self.executor.map(fn, self.tps))
+
+  def execute_and_flatten(self, fn: Callable[[TraceProcessor], pd.DataFrame]
+                         ) -> pd.DataFrame:
+    """Executes the provided function and flattens the result.
+
+    The execution happens in parallel across all the trace processor
+    instances owned by this object and the returned Pandas dataframes are
+    flattened into a single dataframe.
+
+    Args:
+      fn: The function to execute which returns a Pandas dataframe.
+
+    Returns:
+      A Pandas dataframe containing the result of executing the query across all
+      the traces. Extra columns containing the file path and args will
+      be added to the dataframe (see |query_and_flatten| for details).
+    """
+
+    def wrapped(pair: Tuple[TraceProcessor, BatchLoadableTrace]):
+      (tp, args) = pair
+      df = fn(tp)
+      for key, value in args.items():
+        df[key] = value
+      return df
+
+    df = pd.concat(list(self.executor.map(wrapped, zip(self.tps, self.args))))
+    return df.reset_index(drop=True)
 
   def close(self):
-    """Closes this batch trace processor instance: this closes all spawned
-    trace processor instances, releasing all the memory and resources those
-    instances take.
+    """Closes this batch trace processor instance.
+
+    This closes all spawned trace processor instances, releasing all the memory
+    and resources those instances take.
 
     No further calls to other methods in this class should be made after
     calling this method.
     """
     if self.closed:
       return
-
-    self.executor.map(lambda tp: tp.close(), self.tps)
+    self.closed = True
     self.executor.shutdown()
 
-    self.closed = True
-
-  def __execute_on_tps(self, fn):
-    return list(self.executor.map(fn, self.tps))
+    if self.tps:
+      for tp in self.tps:
+        tp.close()
 
   def __enter__(self):
     return self
 
-  def __exit__(self, _, __, ___):
+  def __exit__(self, a, b, c):
+    del a, b, c  # Unused.
     self.close()
     return False
 
diff --git a/tools/open_trace_in_ui b/tools/open_trace_in_ui
index c2360bb..0ec63b4 100755
--- a/tools/open_trace_in_ui
+++ b/tools/open_trace_in_ui
@@ -50,32 +50,41 @@
   print(colors + msg + ANSI.END)
 
 
-def open_trace_in_browser(path):
+def open_trace(path, open_browser):
   # We reuse the HTTP+RPC port because it's the only one allowed by the CSP.
   PORT = 9001
   os.chdir(os.path.dirname(path))
   fname = os.path.basename(path)
   socketserver.TCPServer.allow_reuse_address = True
   with socketserver.TCPServer(('127.0.0.1', PORT), HttpHandler) as httpd:
-    webbrowser.open_new_tab(
-        'https://ui.perfetto.dev/#!/?url=http://127.0.0.1:%d/%s' %
-        (PORT, fname))
+    if open_browser:
+      webbrowser.open_new_tab(
+          'https://ui.perfetto.dev/#!/?url=http://127.0.0.1:%d/%s' %
+          (PORT, fname))
+    else:
+      print('Open URL in browser: '
+            'https://ui.perfetto.dev/#!/?url=http://127.0.0.1:%d/%s' %
+            (PORT, fname))
+
     while httpd.__dict__.get('last_request') != '/' + fname:
       httpd.handle_request()
 
 
 def main():
   examples = '\n'.join([
-      ANSI.BOLD + 'Usage:' + ANSI.END, '  -i path/trace_file_name'
+      ANSI.BOLD + 'Usage:' + ANSI.END, '  -i path/trace_file_name [-n]'
   ])
   parser = argparse.ArgumentParser(
       epilog=examples, formatter_class=argparse.RawTextHelpFormatter)
 
   help = 'Input trace filename'
   parser.add_argument('-i', '--trace', help=help)
+  parser.add_argument('-n', '--no-open-browser', action='store_true',
+                      default=False)
 
   args = parser.parse_args()
   trace_file = args.trace
+  open_browser = not args.no_open_browser
 
   if trace_file is None:
     prt('Please specify trace file name with -i/--trace argument', ANSI.RED)
@@ -85,7 +94,7 @@
     sys.exit(1)
 
   prt('Opening the trace (%s) in the browser' % trace_file)
-  open_trace_in_browser(trace_file)
+  open_trace(trace_file, open_browser)
 
 
 if __name__ == '__main__':