Merge "trace-processor: Add extensions once"
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 8378ac1..65ee8a9 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -22,8 +22,8 @@
   def long_line_sources(x):
     return input.FilterSourceFile(
         x,
-        white_list=".*",
-        black_list=[
+        allow_list=".*",
+        block_list=[
             'Android[.]bp', '.*[.]json$', '.*[.]sql$', '.*[.]out$',
             'test/trace_processor/.*/index$', '.*\bBUILD$', 'WORKSPACE',
             '.*/Makefile$', '/perfetto_build_flags.h$'
@@ -44,7 +44,7 @@
   results += CheckAndroidBlueprint(input, output)
   results += CheckBinaryDescriptors(input, output)
   results += CheckMergedTraceConfigProto(input, output)
-  results += CheckWhitelist(input, output)
+  results += CheckProtoEventList(input, output)
   results += CheckBannedCpp(input, output)
   return results
 
@@ -63,7 +63,7 @@
   # If no GN files were modified, bail out.
   def build_file_filter(x):
     return input_api.FilterSourceFile(
-        x, white_list=('.*BUILD[.]gn$', '.*[.]gni$', 'BUILD\.extras', tool))
+        x, allow_list=('.*BUILD[.]gn$', '.*[.]gni$', 'BUILD\.extras', tool))
 
   if not input_api.AffectedSourceFiles(build_file_filter):
     return []
@@ -81,7 +81,7 @@
   # If no GN files were modified, bail out.
   def build_file_filter(x):
     return input_api.FilterSourceFile(
-        x, white_list=('.*BUILD[.]gn$', '.*[.]gni$', tool))
+        x, allow_list=('.*BUILD[.]gn$', '.*[.]gni$', tool))
 
   if not input_api.AffectedSourceFiles(build_file_filter):
     return []
@@ -98,7 +98,7 @@
 
   def file_filter(x):
     return input_api.FilterSourceFile(
-        x, white_list=['.*[.]cc$', '.*[.]h$', tool])
+        x, allow_list=['.*[.]cc$', '.*[.]h$', tool])
 
   if not input_api.AffectedSourceFiles(file_filter):
     return []
@@ -134,7 +134,7 @@
   ]
 
   def file_filter(x):
-    return input_api.FilterSourceFile(x, white_list=[r'.*\.h$', r'.*\.cc$'])
+    return input_api.FilterSourceFile(x, allow_list=[r'.*\.h$', r'.*\.cc$'])
 
   errors = []
   for f in input_api.AffectedSourceFiles(file_filter):
@@ -151,7 +151,7 @@
   tool = 'tools/check_include_violations'
 
   def file_filter(x):
-    return input_api.FilterSourceFile(x, white_list=['include/.*[.]h$', tool])
+    return input_api.FilterSourceFile(x, allow_list=['include/.*[.]h$', tool])
 
   if not input_api.AffectedSourceFiles(file_filter):
     return []
@@ -165,7 +165,7 @@
 
   def file_filter(x):
     return input_api.FilterSourceFile(
-        x, white_list=['protos/perfetto/.*[.]proto$', '.*[.]h', tool])
+        x, allow_list=['protos/perfetto/.*[.]proto$', '.*[.]h', tool])
 
   if not input_api.AffectedSourceFiles(file_filter):
     return []
@@ -182,7 +182,7 @@
 
   def build_file_filter(x):
     return input_api.FilterSourceFile(
-        x, white_list=['protos/perfetto/.*[.]proto$', tool])
+        x, allow_list=['protos/perfetto/.*[.]proto$', tool])
 
   if not input_api.AffectedSourceFiles(build_file_filter):
     return []
@@ -195,17 +195,17 @@
   return []
 
 
-# Prevent removing or changing lines in event_whitelist.
-def CheckWhitelist(input_api, output_api):
+# Prevent removing or changing lines in event_list.
+def CheckProtoEventList(input_api, output_api):
   for f in input_api.AffectedFiles():
-    if f.LocalPath() != 'tools/ftrace_proto_gen/event_whitelist':
+    if f.LocalPath() != 'tools/ftrace_proto_gen/event_list':
       continue
     if any((not new_line.startswith('removed')) and new_line != old_line
            for old_line, new_line in itertools.izip(f.OldContents(),
                                                     f.NewContents())):
       return [
           output_api.PresubmitError(
-              'event_whitelist only has two supported changes: '
+              'event_list only has two supported changes: '
               'appending a new line, and replacing a line with removed.')
       ]
   return []
@@ -216,7 +216,7 @@
 
   def file_filter(x):
     return input_api.FilterSourceFile(
-        x, white_list=['protos/perfetto/.*[.]proto$', tool])
+        x, allow_list=['protos/perfetto/.*[.]proto$', tool])
 
   if not input_api.AffectedSourceFiles(file_filter):
     return []
diff --git a/docs/contributing/common-tasks.md b/docs/contributing/common-tasks.md
index 31987c8..f0e99ef 100644
--- a/docs/contributing/common-tasks.md
+++ b/docs/contributing/common-tasks.md
@@ -6,7 +6,7 @@
 
 1. Find the `format` file for your event. The location of the file depends where `tracefs` is mounted but can often be found at `/sys/kernel/debug/tracing/events/EVENT_GROUP/EVENT_NAME/format`.
 2. Copy the format file into the codebase at `src/traced/probes/ftrace/test/data/synthetic/events/EVENT_GROUP/EVENT_NAME/format`.
-3. Add the event to [tools/ftrace_proto_gen/event_whitelist](/tools/ftrace_proto_gen/event_whitelist).
+3. Add the event to [tools/ftrace_proto_gen/event_list](/tools/ftrace_proto_gen/event_list).
 4. Run `tools/run_ftrace_proto_gen`. This will update `protos/perfetto/trace/ftrace/ftrace_event.proto` and `protos/perfetto/trace/ftrace/GROUP_NAME.proto`.
 5. Run `tools/gen_all out/YOUR_BUILD_DIRECTORY`. This will update `src/traced/probes/ftrace/event_info.cc` and `protos/perfetto/trace/perfetto_trace.proto`.
 6. If special handling in `trace_processor` is desired update [src/trace_processor/importers/ftrace/ftrace_parser.cc](/src/trace_processor/importers/ftrace/ftrace_parser.cc) to parse the event.
diff --git a/docs/data-sources/native-heap-profiler.md b/docs/data-sources/native-heap-profiler.md
index f582dc3..e0fbe7b 100644
--- a/docs/data-sources/native-heap-profiler.md
+++ b/docs/data-sources/native-heap-profiler.md
@@ -179,8 +179,8 @@
 Profiling requests for non-profileable/debuggable processes will result in an
 empty profile.
 
-On userdebug builds, all processes except for a small blacklist of critical
-services can be profiled (to find the blacklist, look for
+On userdebug builds, all processes except for a small set of critical
+services can be profiled (to find the set of disallowed targets, look for
 `never_profile_heap` in [heapprofd.te](
 https://cs.android.com/android/platform/superproject/+/master:system/sepolicy/private/heapprofd.te?q=never_profile_heap).
 This restriction can be lifted by disabling SELinux by running
diff --git a/docs/design-docs/life-of-a-tracing-session.md b/docs/design-docs/life-of-a-tracing-session.md
index 66690ec..79bdeb6 100644
--- a/docs/design-docs/life-of-a-tracing-session.md
+++ b/docs/design-docs/life-of-a-tracing-session.md
@@ -51,7 +51,7 @@
     buffer.
 18. The service will check if the given chunk, identified by the tuple
     `{ProducerID (unspoofable), WriterID, ChunkID}` is still present in the
-    trace buffer and if so will proceed to patch it (% sanity checks).
+    trace buffer and if so will proceed to patch it (% checks).
 19. The consumer sends a [`FlushRequest`](/protos/perfetto/ipc/consumer_port.proto#52)
     to the service, asking it commit all data on flight in the trace buffers.
 20. The service, in turn, issues a
diff --git a/gn/BUILD.gn b/gn/BUILD.gn
index 7babb1b..30d8469 100644
--- a/gn/BUILD.gn
+++ b/gn/BUILD.gn
@@ -194,7 +194,7 @@
 
 # Full protobuf is just for host tools .No binary shipped on device should
 # depend on this.
-whitelisted_protobuf_full_deps = [
+protobuf_full_deps_allowlist = [
   "../src/ipc/protoc_plugin:*",
   "../src/protozero/protoc_plugin:*",
   "../src/trace_processor:trace_processor_shell",
@@ -216,7 +216,7 @@
 # protoc compiler library, it's used for building protoc plugins and by
 # trace_processor_shell to dynamically load .proto files for metrics.
 group("protoc_lib") {
-  visibility = whitelisted_protobuf_full_deps
+  visibility = protobuf_full_deps_allowlist
   if (current_toolchain == host_toolchain) {
     if (perfetto_use_system_protobuf) {
       public_configs = [
@@ -231,7 +231,7 @@
 }
 
 group("protobuf_full") {
-  visibility = whitelisted_protobuf_full_deps
+  visibility = protobuf_full_deps_allowlist
   if (current_toolchain == host_toolchain) {
     if (perfetto_use_system_protobuf) {
       public_configs = [ ":system_protobuf" ]
diff --git a/gn/standalone/sanitizers/BUILD.gn b/gn/standalone/sanitizers/BUILD.gn
index c015d3e..d3374de 100644
--- a/gn/standalone/sanitizers/BUILD.gn
+++ b/gn/standalone/sanitizers/BUILD.gn
@@ -36,11 +36,7 @@
   cflags = []
   defines = []
   if (using_sanitizer) {
-    blacklist_path_ = rebase_path("blacklist.txt", root_build_dir)
-    cflags += [
-      "-fno-omit-frame-pointer",
-      "-fsanitize-blacklist=$blacklist_path_",
-    ]
+    cflags += [ "-fno-omit-frame-pointer" ]
   }
 
   if (is_asan) {
diff --git a/gn/standalone/sanitizers/blacklist.txt b/gn/standalone/sanitizers/blacklist.txt
deleted file mode 100644
index e0e315b..0000000
--- a/gn/standalone/sanitizers/blacklist.txt
+++ /dev/null
@@ -1 +0,0 @@
-# The rules in this file are only applied at compile time.
diff --git a/perfetto.rc b/perfetto.rc
index a33bd03..44e9ebd 100644
--- a/perfetto.rc
+++ b/perfetto.rc
@@ -25,8 +25,8 @@
     class late_start
     disabled
     user nobody
-    # Despite the "log" group below, traced_probes is whitelisted for log read
-    # access only on userdebug/eng via selinux (see traced_probes.te).
+    # Despite the "log" group below, traced_probes is allowed to read log
+    # only on userdebug/eng via selinux (see traced_probes.te).
     group nobody readproc log
     writepid /dev/cpuset/system-background/tasks
     # Clean up procfs configuration even if traced_probes crashes
diff --git a/protos/perfetto/config/perfetto_config.proto b/protos/perfetto/config/perfetto_config.proto
index 6ed4bdc..633c3ba 100644
--- a/protos/perfetto/config/perfetto_config.proto
+++ b/protos/perfetto/config/perfetto_config.proto
@@ -702,23 +702,23 @@
   // If unset, an implementation-defined default is used.
   optional uint32 ring_buffer_pages = 3;
 
-  // Process ID (TGID) whitelist. If this list is not empty, only matching
-  // samples will be retained. If multiple whitelists and blacklists are
+  // Process ID (TGID) allowlist. If this list is not empty, only matching
+  // samples will be retained. If multiple allow/deny-lists are
   // specified by the config, then all of them are evaluated for each sampled
   // process.
   repeated int32 target_pid = 4;
 
-  // Command line whitelist, matched against the
+  // Command line allowlist, matched against the
   // /proc/<pid>/cmdline (not the comm string), with both sides being
   // "normalized". Normalization is as follows: (1) trim everything beyond the
   // first null or "@" byte; (2) if the string contains forward slashes, trim
   // everything up to and including the last one.
   repeated string target_cmdline = 5;
 
-  // PID blacklist.
+  // List of excluded pids.
   repeated int32 exclude_pid = 6;
 
-  // Command line blacklist. Normalized in the same way as |target_cmdline|.
+  // List of excluded cmdlines. Normalized in the same way as |target_cmdline|.
   repeated string exclude_cmdline = 7;
 
   ////////////////////
diff --git a/protos/perfetto/config/profiling/perf_event_config.proto b/protos/perfetto/config/profiling/perf_event_config.proto
index f5b186c..3a73178 100644
--- a/protos/perfetto/config/profiling/perf_event_config.proto
+++ b/protos/perfetto/config/profiling/perf_event_config.proto
@@ -40,23 +40,23 @@
   // If unset, an implementation-defined default is used.
   optional uint32 ring_buffer_pages = 3;
 
-  // Process ID (TGID) whitelist. If this list is not empty, only matching
-  // samples will be retained. If multiple whitelists and blacklists are
+  // Process ID (TGID) allowlist. If this list is not empty, only matching
+  // samples will be retained. If multiple allow/deny-lists are
   // specified by the config, then all of them are evaluated for each sampled
   // process.
   repeated int32 target_pid = 4;
 
-  // Command line whitelist, matched against the
+  // Command line allowlist, matched against the
   // /proc/<pid>/cmdline (not the comm string), with both sides being
   // "normalized". Normalization is as follows: (1) trim everything beyond the
   // first null or "@" byte; (2) if the string contains forward slashes, trim
   // everything up to and including the last one.
   repeated string target_cmdline = 5;
 
-  // PID blacklist.
+  // List of excluded pids.
   repeated int32 exclude_pid = 6;
 
-  // Command line blacklist. Normalized in the same way as |target_cmdline|.
+  // List of excluded cmdlines. Normalized in the same way as |target_cmdline|.
   repeated string exclude_cmdline = 7;
 
   ////////////////////
diff --git a/protos/perfetto/trace/perfetto_trace.proto b/protos/perfetto/trace/perfetto_trace.proto
index cc44523..89a4aeb 100644
--- a/protos/perfetto/trace/perfetto_trace.proto
+++ b/protos/perfetto/trace/perfetto_trace.proto
@@ -702,23 +702,23 @@
   // If unset, an implementation-defined default is used.
   optional uint32 ring_buffer_pages = 3;
 
-  // Process ID (TGID) whitelist. If this list is not empty, only matching
-  // samples will be retained. If multiple whitelists and blacklists are
+  // Process ID (TGID) allowlist. If this list is not empty, only matching
+  // samples will be retained. If multiple allow/deny-lists are
   // specified by the config, then all of them are evaluated for each sampled
   // process.
   repeated int32 target_pid = 4;
 
-  // Command line whitelist, matched against the
+  // Command line allowlist, matched against the
   // /proc/<pid>/cmdline (not the comm string), with both sides being
   // "normalized". Normalization is as follows: (1) trim everything beyond the
   // first null or "@" byte; (2) if the string contains forward slashes, trim
   // everything up to and including the last one.
   repeated string target_cmdline = 5;
 
-  // PID blacklist.
+  // List of excluded pids.
   repeated int32 exclude_pid = 6;
 
-  // Command line blacklist. Normalized in the same way as |target_cmdline|.
+  // List of excluded cmdlines. Normalized in the same way as |target_cmdline|.
   repeated string exclude_cmdline = 7;
 
   ////////////////////
diff --git a/src/perfetto_cmd/perfetto_config.descriptor.h b/src/perfetto_cmd/perfetto_config.descriptor.h
index 0769940..4d75326 100644
--- a/src/perfetto_cmd/perfetto_config.descriptor.h
+++ b/src/perfetto_cmd/perfetto_config.descriptor.h
@@ -27,7 +27,7 @@
 // SHA1(tools/gen_binary_descriptors)
 // 6deed7c8efd4c9f8450c38a2560e8844bbbd6ea8
 // SHA1(protos/perfetto/config/perfetto_config.proto)
-// 36ae9665c14a73924386606a37f83a57648387bb
+// 3433b20df9aecd9b08f84ef1a821b2867c032354
 
 // This is the proto PerfettoConfig encoded as a ProtoFileDescriptor to allow
 // for reflection without libprotobuf full/non-lite protos.
diff --git a/src/profiling/perf/event_config.h b/src/profiling/perf/event_config.h
index 27e8915..33c7f8e 100644
--- a/src/profiling/perf/event_config.h
+++ b/src/profiling/perf/event_config.h
@@ -32,8 +32,8 @@
 namespace perfetto {
 namespace profiling {
 
-// Parsed whitelist/blacklist for filtering samples.
-// An empty whitelist set means that all targets are allowed.
+// Parsed allow/deny-list for filtering samples.
+// An empty filter set means that all targets are allowed.
 struct TargetFilter {
   base::FlatSet<std::string> cmdlines;
   base::FlatSet<std::string> exclude_cmdlines;
@@ -91,7 +91,7 @@
   // *each* per-cpu buffer.
   const uint32_t samples_per_tick_limit_;
 
-  // Parsed whitelist/blacklist for filtering samples.
+  // Parsed allow/deny-list for filtering samples.
   const TargetFilter target_filter_;
 
   // Timeout for proc-fd lookup.
diff --git a/src/profiling/perf/perf_producer.cc b/src/profiling/perf/perf_producer.cc
index e0dde42..982846a 100644
--- a/src/profiling/perf/perf_producer.cc
+++ b/src/profiling/perf/perf_producer.cc
@@ -89,13 +89,13 @@
   bool reject_cmd = false;
   std::string cmdline;
   if (GetCmdlineForPID(pid, &cmdline)) {  // normalized form
-    // reject if absent from non-empty whitelist, or present in blacklist
+    // reject if absent from non-empty filters, or if excluded.
     reject_cmd = (filter.cmdlines.size() && !filter.cmdlines.count(cmdline)) ||
                  filter.exclude_cmdlines.count(cmdline);
   } else {
     PERFETTO_DLOG("Failed to look up cmdline for pid [%d]",
                   static_cast<int>(pid));
-    // reject only if there's a whitelist present
+    // reject only if there's a filter present
     reject_cmd = filter.cmdlines.size() > 0;
   }
 
@@ -427,7 +427,7 @@
       PERFETTO_DLOG("New pid: [%d]", static_cast<int>(pid));
 
       // Check whether samples for this new process should be
-      // dropped due to the target whitelist/blacklist.
+      // dropped due to the target filtering.
       const TargetFilter& filter = ds->event_config.filter();
       if (ShouldRejectDueToFilter(pid, filter)) {
         process_state = ProcessTrackingStatus::kRejected;
diff --git a/src/protozero/protoc_plugin/protozero_plugin.cc b/src/protozero/protoc_plugin/protozero_plugin.cc
index 874ee61..1ac9f3a 100644
--- a/src/protozero/protoc_plugin/protozero_plugin.cc
+++ b/src/protozero/protoc_plugin/protozero_plugin.cc
@@ -299,7 +299,7 @@
     if (source_->weak_dependency_count() > 0)
       Abort("Weak imports are not supported.");
 
-    // Sanity check. Collect public imports (of collected imports) in DFS order.
+    // Validations. Collect public imports (of collected imports) in DFS order.
     // Visibilty for current proto:
     // - all imports listed in current proto,
     // - public imports of everything imported (recursive).
@@ -326,8 +326,8 @@
     }
 
     // Collect descriptors of messages and enums used in current proto.
-    // It will be used to generate necessary forward declarations and performed
-    // sanity check guarantees that everything lays in the same namespace.
+    // It will be used to generate necessary forward declarations and
+    // check that everything lays in the same namespace.
     for (const Descriptor* message : messages_) {
       for (int i = 0; i < message->field_count(); ++i) {
         const FieldDescriptor* field = message->field(i);
diff --git a/src/trace_processor/db/table.h b/src/trace_processor/db/table.h
index c49a851..45d96e4 100644
--- a/src/trace_processor/db/table.h
+++ b/src/trace_processor/db/table.h
@@ -211,6 +211,9 @@
   // Returns an iterator into the Table.
   Iterator IterateRows() const { return Iterator(this); }
 
+  // Creates a copy of this table.
+  Table Copy() const;
+
   uint32_t row_count() const { return row_count_; }
   const std::vector<RowMap>& row_maps() const { return row_maps_; }
 
@@ -226,7 +229,6 @@
  private:
   friend class Column;
 
-  Table Copy() const;
   Table CopyExceptRowMaps() const;
 };
 
diff --git a/src/trace_processor/dynamic/experimental_slice_layout_generator.cc b/src/trace_processor/dynamic/experimental_slice_layout_generator.cc
index 132d7dc..a111016 100644
--- a/src/trace_processor/dynamic/experimental_slice_layout_generator.cc
+++ b/src/trace_processor/dynamic/experimental_slice_layout_generator.cc
@@ -95,7 +95,36 @@
 
   StringPool::Id filter_id =
       string_pool_->InternString(base::StringView(filter_string));
-  return AddLayoutColumn(*slice_table_, selected_tracks, filter_id);
+
+  // Try and find the table in the cache.
+  auto it = layout_table_cache_.find(filter_id);
+  if (it != layout_table_cache_.end()) {
+    return std::unique_ptr<Table>(new Table(it->second.Copy()));
+  }
+
+  // Find all the slices for the tracks we want to filter and create a RowMap
+  // out of them.
+  // TODO(lalitm): Update this to use iterator (as this code will be slow after
+  // the event table is implemented).
+  // TODO(lalitm): consider generalising this by adding OR constraint support to
+  // Constraint and Table::Filter. We definitely want to wait until we have more
+  // usecases before implementing that though because it will be a significant
+  // amount of work.
+  RowMap rm;
+  for (uint32_t i = 0; i < slice_table_->row_count(); ++i) {
+    if (selected_tracks.count(slice_table_->track_id()[i]) > 0) {
+      rm.Insert(i);
+    }
+  }
+
+  // Apply the row map to the table to cut down on the number of rows we have to
+  // go through.
+  Table filtered_table = slice_table_->Apply(std::move(rm));
+
+  // Compute the table and add it to the cache for future use.
+  Table layout_table = ComputeLayoutTable(filtered_table, filter_id);
+  auto res = layout_table_cache_.emplace(filter_id, std::move(layout_table));
+  return std::unique_ptr<Table>(new Table(res.first->second.Copy()));
 }
 
 // Build up a table of slice id -> root slice id by observing each
@@ -143,12 +172,13 @@
 // 3. Go though each slice and give it a layout_depth by summing it's
 //    current depth and the root layout_depth of the stalactite it belongs to.
 //
-std::unique_ptr<Table> ExperimentalSliceLayoutGenerator::AddLayoutColumn(
+Table ExperimentalSliceLayoutGenerator::ComputeLayoutTable(
     const Table& table,
-    const std::set<TrackId>& selected,
     StringPool::Id filter_id) {
-  const auto& track_id_col =
-      *table.GetTypedColumnByName<tables::TrackTable::Id>("track_id");
+  std::map<tables::SliceTable::Id, GroupInfo> groups;
+  // Map of id -> root_id
+  std::map<tables::SliceTable::Id, tables::SliceTable::Id> id_map;
+
   const auto& id_col = *table.GetIdColumnByName<tables::SliceTable::Id>("id");
   const auto& parent_id_col =
       *table.GetTypedColumnByName<base::Optional<tables::SliceTable::Id>>(
@@ -157,19 +187,11 @@
   const auto& ts_col = *table.GetTypedColumnByName<int64_t>("ts");
   const auto& dur_col = *table.GetTypedColumnByName<int64_t>("dur");
 
-  std::map<tables::SliceTable::Id, GroupInfo> groups;
-  // Map of id -> root_id
-  std::map<tables::SliceTable::Id, tables::SliceTable::Id> id_map;
-
   // Step 1:
   // Find the bounding box (start ts, end ts, and max depth) for each group
-  // TODO(lalitm): Update this to use iterator (will be slow after event table)
+  // TODO(lalitm): Update this to use iterator (as this code will be slow after
+  // the event table is implemented)
   for (uint32_t i = 0; i < table.row_count(); ++i) {
-    TrackId track_id = track_id_col[i];
-    if (selected.count(track_id) == 0) {
-      continue;
-    }
-
     tables::SliceTable::Id id = id_col[i];
     base::Optional<tables::SliceTable::Id> parent_id = parent_id_col[i];
     uint32_t depth = depth_col[i];
@@ -259,31 +281,20 @@
       new NullableVector<StringPool::Id>());
 
   for (uint32_t i = 0; i < table.row_count(); ++i) {
-    TrackId track_id = track_id_col[i];
     tables::SliceTable::Id id = id_col[i];
     uint32_t depth = depth_col[i];
-    if (selected.count(track_id) == 0) {
-      // Don't care about depth for slices from non-selected tracks:
-      layout_depth_column->Append(0);
-      // We (ab)use this column to also filter out all the slices we don't care
-      // about by giving it a different value.
-      filter_column->Append(empty_string_id_);
-    } else {
-      // Each slice depth is it's current slice depth + root slice depth of the
-      // group:
-      layout_depth_column->Append(depth + groups.at(id_map[id]).layout_depth);
-      // We must set this to the value we got in the constraint to ensure our
-      // rows are not filtered out:
-      filter_column->Append(filter_id);
-    }
+    // Each slice depth is it's current slice depth + root slice depth of the
+    // group:
+    layout_depth_column->Append(depth + groups.at(id_map[id]).layout_depth);
+    // We must set this to the value we got in the constraint to ensure our
+    // rows are not filtered out:
+    filter_column->Append(filter_id);
   }
-
-  return std::unique_ptr<Table>(new Table(
-      table
-          .ExtendWithColumn("layout_depth", std::move(layout_depth_column),
-                            TypedColumn<int64_t>::default_flags())
-          .ExtendWithColumn("filter_track_ids", std::move(filter_column),
-                            TypedColumn<StringPool::Id>::default_flags())));
+  return table
+      .ExtendWithColumn("layout_depth", std::move(layout_depth_column),
+                        TypedColumn<int64_t>::default_flags())
+      .ExtendWithColumn("filter_track_ids", std::move(filter_column),
+                        TypedColumn<StringPool::Id>::default_flags());
 }
 
 }  // namespace trace_processor
diff --git a/src/trace_processor/dynamic/experimental_slice_layout_generator.h b/src/trace_processor/dynamic/experimental_slice_layout_generator.h
index 3f5e6ff..4b033f7 100644
--- a/src/trace_processor/dynamic/experimental_slice_layout_generator.h
+++ b/src/trace_processor/dynamic/experimental_slice_layout_generator.h
@@ -43,14 +43,16 @@
                                       const std::vector<Order>&) override;
 
  private:
-  std::unique_ptr<Table> AddLayoutColumn(const Table& table,
-                                         const std::set<TrackId>& selected,
-                                         StringPool::Id filter_id);
+  Table ComputeLayoutTable(const Table& table, StringPool::Id filter_id);
   tables::SliceTable::Id InsertSlice(
       std::map<tables::SliceTable::Id, tables::SliceTable::Id>& id_map,
       tables::SliceTable::Id id,
       base::Optional<tables::SliceTable::Id> parent_id);
 
+  // TODO(lalitm): remove this cache and move to having explicitly scoped
+  // lifetimes of dynamic tables.
+  std::unordered_map<StringId, Table> layout_table_cache_;
+
   StringPool* string_pool_;
   const tables::SliceTable* slice_table_;
   const StringPool::Id empty_string_id_;
diff --git a/src/trace_processor/importers/common/clock_tracker.cc b/src/trace_processor/importers/common/clock_tracker.cc
index 4f52029..1838b2a 100644
--- a/src/trace_processor/importers/common/clock_tracker.cc
+++ b/src/trace_processor/importers/common/clock_tracker.cc
@@ -251,8 +251,8 @@
     // And use that to retrieve the corresponding time in the next clock domain.
     // The snapshot id must exist in the target clock domain. If it doesn't
     // either the hash logic or the pathfinding logic are bugged.
-    // This can also happen if the sanity checks in AddSnapshot fail and we
-    // skip part of the snapshot.
+    // This can also happen if the checks in AddSnapshot fail and we skip part
+    // of the snapshot.
     const ClockSnapshots& next_snap = next_clock->GetSnapshot(hash);
 
     // Using std::lower_bound because snapshot_ids is sorted, so we can do
diff --git a/src/trace_processor/importers/proto/args_table_utils.h b/src/trace_processor/importers/proto/args_table_utils.h
index db06732..88e00b2 100644
--- a/src/trace_processor/importers/proto/args_table_utils.h
+++ b/src/trace_processor/importers/proto/args_table_utils.h
@@ -107,7 +107,7 @@
   //
   // To generate |proto_descriptor_array| please see
   // tools/gen_binary_descriptors and ensure the proto you are interested in is
-  // listed as a whitelisted proto. You can then find your variable inside the
+  // listed in the event_list file. You can then find your variable inside the
   // header location specified inside that python script.
   util::Status AddProtoFileDescriptor(const uint8_t* proto_descriptor_array,
                                       size_t proto_descriptor_array_size);
diff --git a/src/traced/probes/ftrace/compact_sched.cc b/src/traced/probes/ftrace/compact_sched.cc
index 35309d8..cd2d117 100644
--- a/src/traced/probes/ftrace/compact_sched.cc
+++ b/src/traced/probes/ftrace/compact_sched.cc
@@ -208,7 +208,7 @@
   return CompactSchedConfig{/*enabled=*/false};
 }
 
-// Sanity check size of stack-allocated bundle state.
+// Check size of stack-allocated bundle state.
 static_assert(sizeof(CompactSchedBuffer) <= 1 << 18,
               "CompactSchedBuffer's on-stack size excessively large.");
 
diff --git a/src/traced/probes/ftrace/event_info_unittest.cc b/src/traced/probes/ftrace/event_info_unittest.cc
index 6c3f9d8..5fbee6d 100644
--- a/src/traced/probes/ftrace/event_info_unittest.cc
+++ b/src/traced/probes/ftrace/event_info_unittest.cc
@@ -23,7 +23,7 @@
 namespace {
 using protozero::proto_utils::ProtoSchemaType;
 
-TEST(EventInfoTest, GetStaticEventInfoSanityCheck) {
+TEST(EventInfoTest, GetStaticEventInfoValidations) {
   std::vector<Event> events = GetStaticEventInfo();
   for (const Event& event : events) {
     // For each event the following fields should be filled
@@ -53,7 +53,7 @@
   }
 }
 
-TEST(EventInfoTest, GetStaticCommonFieldsInfoSanityCheck) {
+TEST(EventInfoTest, GetStaticCommonFieldsInfoValidations) {
   std::vector<Field> fields = GetStaticCommonFieldsInfo();
   for (const Field& field : fields) {
     // Non-empty name, group, and proto field id.
@@ -69,7 +69,7 @@
   }
 }
 
-TEST(EventInfoTest, SetTranslationStrategySanityCheck) {
+TEST(EventInfoTest, SetTranslationStrategyValidations) {
   TranslationStrategy strategy = kUint32ToUint32;
   ASSERT_FALSE(SetTranslationStrategy(kFtraceCString, ProtoSchemaType::kUint64,
                                       &strategy));
diff --git a/src/traced/probes/ftrace/proto_translation_table.cc b/src/traced/probes/ftrace/proto_translation_table.cc
index 44fe5f5..a84d2e2 100644
--- a/src/traced/probes/ftrace/proto_translation_table.cc
+++ b/src/traced/probes/ftrace/proto_translation_table.cc
@@ -414,7 +414,7 @@
     if (contents.empty() || !ParseFtraceEvent(contents, &ftrace_event)) {
       if (!strcmp(event.group, "ftrace") && !strcmp(event.name, "print")) {
         // On some "user" builds of Android <P the ftrace/print event is not
-        // selinux-whitelisted. Thankfully this event is an always-on built-in
+        // selinux-allowed. Thankfully this event is an always-on built-in
         // so we don't need to write to its 'enable' file. However we need to
         // know its binary layout to decode it, so we hardcode it.
         ftrace_event.id = 5;  // Seems quite stable across kernels.
diff --git a/src/traced/probes/ps/process_stats_data_source.cc b/src/traced/probes/ps/process_stats_data_source.cc
index 6adb342..cacbb48 100644
--- a/src/traced/probes/ps/process_stats_data_source.cc
+++ b/src/traced/probes/ps/process_stats_data_source.cc
@@ -399,9 +399,9 @@
 }
 
 void ProcessStatsDataSource::WriteAllProcessStats() {
-  // TODO(primiano): implement whitelisting of processes by names.
+  // TODO(primiano): implement filtering of processes by names.
   // TODO(primiano): Have a pid cache to avoid wasting cycles reading kthreads
-  // proc files over and over. Same for non-whitelist processes (see above).
+  // proc files over and over. Same for non-filtered processes (see above).
 
   CacheProcFsScanStartTimestamp();
   PERFETTO_METATRACE_SCOPED(TAG_PROC_POLLERS, PS_WRITE_ALL_PROCESS_STATS);
diff --git a/src/tracing/core/shared_memory_abi.cc b/src/tracing/core/shared_memory_abi.cc
index 75b9713..4b658f2 100644
--- a/src/tracing/core/shared_memory_abi.cc
+++ b/src/tracing/core/shared_memory_abi.cc
@@ -123,7 +123,7 @@
   static_assert((kAllChunksComplete & kChunkMask) == kChunkComplete,
                 "kAllChunksComplete out of sync with kChunkComplete");
 
-  // Sanity check the consistency of the kMax... constants.
+  // Check the consistency of the kMax... constants.
   static_assert(sizeof(ChunkHeader::writer_id) == sizeof(WriterID),
                 "WriterID size");
   ChunkHeader chunk_header{};
diff --git a/src/tracing/core/shared_memory_abi_unittest.cc b/src/tracing/core/shared_memory_abi_unittest.cc
index aed8893..e8d7125 100644
--- a/src/tracing/core/shared_memory_abi_unittest.cc
+++ b/src/tracing/core/shared_memory_abi_unittest.cc
@@ -107,7 +107,7 @@
       ASSERT_EQ(SharedMemoryABI::kChunkBeingWritten,
                 abi.GetChunkState(page_idx, chunk_idx));
 
-      // Sanity check chunk bounds.
+      // Check chunk bounds.
       size_t expected_chunk_size =
           (page_size() - sizeof(SharedMemoryABI::PageHeader)) / num_chunks;
       expected_chunk_size = expected_chunk_size - (expected_chunk_size % 4);
diff --git a/src/tracing/core/trace_buffer.cc b/src/tracing/core/trace_buffer.cc
index 2227870..c7c5392 100644
--- a/src/tracing/core/trace_buffer.cc
+++ b/src/tracing/core/trace_buffer.cc
@@ -121,7 +121,7 @@
       base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
   if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
     stats_.set_abi_violations(stats_.abi_violations() + 1);
-    PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
+    PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
     return;
   }
 
@@ -172,7 +172,7 @@
                           prev->num_fragments > num_fragments ||
                           (prev->flags & chunk_flags) != prev->flags)) {
       stats_.set_abi_violations(stats_.abi_violations() + 1);
-      PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
       return;
     }
 
@@ -189,14 +189,14 @@
     if (subsequent_it != index_.end() &&
         subsequent_it->second.num_fragments_read > 0) {
       stats_.set_abi_violations(stats_.abi_violations() + 1);
-      PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
       return;
     }
 
     // If this chunk was previously copied with the same number of fragments and
     // the number didn't change, there's no need to copy it again. If the
     // previous chunk was complete already, this should always be the case.
-    PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_ ||
+    PERFETTO_DCHECK(suppress_client_dchecks_for_testing_ ||
                     !record_meta->is_complete() ||
                     (chunk_complete && prev->num_fragments == num_fragments));
     if (prev->num_fragments == num_fragments) {
@@ -208,7 +208,7 @@
     if (record_meta->num_fragments_read > prev->num_fragments) {
       PERFETTO_ELOG(
           "TraceBuffer read too many fragments from an incomplete chunk");
-      PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
       return;
     }
 
@@ -826,7 +826,7 @@
     // The producer has a bug or is malicious and did declare that the chunk
     // contains more packets beyond its boundaries.
     stats_.set_abi_violations(stats_.abi_violations() + 1);
-    PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
+    PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
     chunk_meta->cur_fragment_offset = 0;
     chunk_meta->num_fragments_read = chunk_meta->num_fragments;
     if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
@@ -856,7 +856,7 @@
     // R).
     if (packet_size != SharedMemoryABI::kPacketSizeDropPacket) {
       stats_.set_abi_violations(stats_.abi_violations() + 1);
-      PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
     } else {
       stats_.set_trace_writer_packet_loss(stats_.trace_writer_packet_loss() +
                                           1);
@@ -884,7 +884,7 @@
     // We have at least one more packet to parse. It should be within the chunk.
     if (chunk_meta->cur_fragment_offset + sizeof(ChunkRecord) >=
         chunk_meta->chunk_record->size) {
-      PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
     }
   }
 
diff --git a/src/tracing/core/trace_buffer.h b/src/tracing/core/trace_buffer.h
index a95c2f6..f50aac9 100644
--- a/src/tracing/core/trace_buffer.h
+++ b/src/tracing/core/trace_buffer.h
@@ -649,7 +649,7 @@
   // When true disable some DCHECKs that have been put in place to detect
   // bugs in the producers. This is for tests that feed malicious inputs and
   // hence mimic a buggy producer.
-  bool suppress_sanity_dchecks_for_testing_ = false;
+  bool suppress_client_dchecks_for_testing_ = false;
 };
 
 }  // namespace perfetto
diff --git a/src/tracing/core/trace_buffer_unittest.cc b/src/tracing/core/trace_buffer_unittest.cc
index f7012a5..a7bf27a 100644
--- a/src/tracing/core/trace_buffer_unittest.cc
+++ b/src/tracing/core/trace_buffer_unittest.cc
@@ -124,8 +124,8 @@
         trace_buffer_->index_.lower_bound(key));
   }
 
-  void SuppressSanityDchecksForTesting() {
-    trace_buffer_->suppress_sanity_dchecks_for_testing_ = true;
+  void SuppressClientDchecksForTesting() {
+    trace_buffer_->suppress_client_dchecks_for_testing_ = true;
   }
 
   std::vector<ChunkMetaKey> GetIndex() {
@@ -978,7 +978,7 @@
 
 TEST_F(TraceBufferTest, Malicious_ZeroSizedChunk) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(32, 'a')
       .CopyIntoTraceBuffer();
@@ -1002,7 +1002,7 @@
 // in a no-op.
 TEST_F(TraceBufferTest, Malicious_ChunkTooBig) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(4096, 'a')
       .AddPacket(2048, 'b')
@@ -1013,7 +1013,7 @@
 
 TEST_F(TraceBufferTest, Malicious_DeclareMorePacketsBeyondBoundaries) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(64, 'a')
       .IncrementNumPackets()
@@ -1034,7 +1034,7 @@
 
 TEST_F(TraceBufferTest, Malicious_ZeroVarintHeader) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   // Create a standalone chunk where the varint header is == 0.
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(4, 'a')
@@ -1054,7 +1054,7 @@
 // end of the buffer).
 TEST_F(TraceBufferTest, Malicious_OverflowingVarintHeader) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(4079, 'a')  // 4079 := 4096 - sizeof(ChunkRecord) - 1
       .AddPacket({0x82})  // 0x8*: that the varint continues on the next byte.
@@ -1067,7 +1067,7 @@
 
 TEST_F(TraceBufferTest, Malicious_VarintHeaderTooBig) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
 
   // Add a valid chunk.
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
@@ -1108,7 +1108,7 @@
 // contains an enormous varint number that tries to overflow.
 TEST_F(TraceBufferTest, Malicious_JumboVarint) {
   ResetBuffer(64 * 1024);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
 
   std::vector<uint8_t> chunk;
   chunk.insert(chunk.end(), 64 * 1024 - sizeof(ChunkRecord) * 2, 0xff);
@@ -1128,7 +1128,7 @@
 // skipped.
 TEST_F(TraceBufferTest, Malicious_ZeroVarintHeaderInSequence) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(4, 'a', kContOnNextChunk)
       .CopyIntoTraceBuffer();
@@ -1161,7 +1161,7 @@
 // zero-sized fragment should be skipped.
 TEST_F(TraceBufferTest, Malicious_ZeroVarintHeaderAtEndOfChunk) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(4, 'a')
       .AddPacket(4, 'b', kContOnNextChunk)
@@ -1206,7 +1206,7 @@
 
 TEST_F(TraceBufferTest, Malicious_OverrideWithShorterChunkSize) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(2048, 'a')
       .CopyIntoTraceBuffer();
@@ -1222,7 +1222,7 @@
 
 TEST_F(TraceBufferTest, Malicious_OverrideWithShorterChunkSizeAfterRead) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
 
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(30, 'a')
@@ -1256,7 +1256,7 @@
 
 TEST_F(TraceBufferTest, Malicious_OverrideWithDifferentOffsetAfterRead) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
 
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(30, 'a')
@@ -1433,7 +1433,7 @@
 
   // Overriding a complete packet here would trigger a DCHECK because the packet
   // was already marked as complete.
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(20, 'a')
       .AddPacket(30, 'b')
@@ -1450,7 +1450,7 @@
 // See also the Malicious_Override* tests above.
 TEST_F(TraceBufferTest, Override_ReCommitInvalid) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(20, 'a')
       .AddPacket(30, 'b')
@@ -1738,7 +1738,7 @@
 
 TEST_F(TraceBufferTest, MissingPacketsOnSequence) {
   ResetBuffer(4096);
-  SuppressSanityDchecksForTesting();
+  SuppressClientDchecksForTesting();
   CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
       .AddPacket(10, 'a')
       .AddPacket(10, 'b')
diff --git a/tools/ftrace_proto_gen/event_whitelist b/tools/ftrace_proto_gen/event_list
similarity index 100%
rename from tools/ftrace_proto_gen/event_whitelist
rename to tools/ftrace_proto_gen/event_list
diff --git a/tools/ftrace_proto_gen/ftrace_proto_gen.cc b/tools/ftrace_proto_gen/ftrace_proto_gen.cc
index 7ed8110..5320199 100644
--- a/tools/ftrace_proto_gen/ftrace_proto_gen.cc
+++ b/tools/ftrace_proto_gen/ftrace_proto_gen.cc
@@ -70,12 +70,12 @@
   return ToCamelCase(EventNameToProtoFieldName(group, name)) + "FtraceEvent";
 }
 
-std::vector<FtraceEventName> ReadWhitelist(const std::string& filename) {
+std::vector<FtraceEventName> ReadAllowList(const std::string& filename) {
   std::string line;
   std::vector<FtraceEventName> lines;
   std::ifstream fin(filename, std::ios::in);
   if (!fin) {
-    fprintf(stderr, "failed to open whitelist %s\n", filename.c_str());
+    fprintf(stderr, "failed to open event list %s\n", filename.c_str());
     return lines;
   }
   while (std::getline(fin, line)) {
@@ -116,7 +116,7 @@
   return true;
 }
 
-void GenerateFtraceEventProto(const std::vector<FtraceEventName>& raw_whitelist,
+void GenerateFtraceEventProto(const std::vector<FtraceEventName>& raw_eventlist,
                               const std::set<std::string>& groups,
                               std::ostream* fout) {
   *fout << kCopyrightHeader;
@@ -148,7 +148,7 @@
 )";
 
   int i = 3;
-  for (const FtraceEventName& event : raw_whitelist) {
+  for (const FtraceEventName& event : raw_eventlist) {
     if (!event.valid()) {
       *fout << "    // removed field with id " << i << ";\n";
       ++i;
@@ -221,7 +221,7 @@
   return s;
 }
 
-// This will generate the event_info.cc file for the whitelisted protos.
+// This will generate the event_info.cc file for the listed protos.
 void GenerateEventInfo(const std::vector<std::string>& events_info,
                        std::ostream* fout) {
   std::string s = kCopyrightHeader;
diff --git a/tools/ftrace_proto_gen/ftrace_proto_gen.h b/tools/ftrace_proto_gen/ftrace_proto_gen.h
index f9faed4..5592179 100644
--- a/tools/ftrace_proto_gen/ftrace_proto_gen.h
+++ b/tools/ftrace_proto_gen/ftrace_proto_gen.h
@@ -38,8 +38,8 @@
 std::string EventNameToProtoFieldName(const std::string& group,
                                       const std::string& name);
 
-std::vector<FtraceEventName> ReadWhitelist(const std::string& filename);
-void GenerateFtraceEventProto(const std::vector<FtraceEventName>& raw_whitelist,
+std::vector<FtraceEventName> ReadAllowList(const std::string& filename);
+void GenerateFtraceEventProto(const std::vector<FtraceEventName>& raw_eventlist,
                               const std::set<std::string>& groups,
                               std::ostream* fout);
 std::string SingleEventInfo(perfetto::Proto proto,
diff --git a/tools/ftrace_proto_gen/main.cc b/tools/ftrace_proto_gen/main.cc
index d051c4a..207ec2c 100644
--- a/tools/ftrace_proto_gen/main.cc
+++ b/tools/ftrace_proto_gen/main.cc
@@ -45,7 +45,7 @@
 
 void PrintUsage(const char* bin_name) {
   fprintf(stderr,
-          "Usage: %s -w whitelist_dir -o output_dir -d proto_descriptor "
+          "Usage: %s -w event_list_path -o output_dir -d proto_descriptor "
           "[--check_only] input_dir...\n",
           bin_name);
 }
@@ -53,7 +53,7 @@
 
 int main(int argc, char** argv) {
   static struct option long_options[] = {
-      {"whitelist_path", required_argument, nullptr, 'w'},
+      {"event_list", required_argument, nullptr, 'w'},
       {"output_dir", required_argument, nullptr, 'o'},
       {"proto_descriptor", required_argument, nullptr, 'd'},
       {"update_build_files", no_argument, nullptr, 'b'},
@@ -63,7 +63,7 @@
   int option_index;
   int c;
 
-  std::string whitelist_path;
+  std::string event_list_path;
   std::string output_dir;
   std::string proto_descriptor;
   bool update_build_files = false;
@@ -74,7 +74,7 @@
   while ((c = getopt_long(argc, argv, "", long_options, &option_index)) != -1) {
     switch (c) {
       case 'w':
-        whitelist_path = optarg;
+        event_list_path = optarg;
         break;
       case 'o':
         output_dir = optarg;
@@ -100,12 +100,12 @@
     return 1;
   }
 
-  PERFETTO_CHECK(!whitelist_path.empty());
+  PERFETTO_CHECK(!event_list_path.empty());
   PERFETTO_CHECK(!output_dir.empty());
   PERFETTO_CHECK(!proto_descriptor.empty());
 
-  std::vector<perfetto::FtraceEventName> whitelist =
-      perfetto::ReadWhitelist(whitelist_path);
+  std::vector<perfetto::FtraceEventName> event_list =
+      perfetto::ReadAllowList(event_list_path);
   std::vector<std::string> events_info;
 
   google::protobuf::DescriptorPool descriptor_pool;
@@ -127,7 +127,7 @@
   std::set<std::string> groups;
   std::multimap<std::string, const perfetto::FtraceEventName*> group_to_event;
   std::set<std::string> new_events;
-  for (const auto& event : whitelist) {
+  for (const auto& event : event_list) {
     if (!event.valid())
       continue;
     groups.emplace(event.group());
@@ -143,7 +143,7 @@
   {
     std::unique_ptr<std::ostream> out =
         ostream_factory(output_dir + "/ftrace_event.proto");
-    perfetto::GenerateFtraceEventProto(whitelist, groups, out.get());
+    perfetto::GenerateFtraceEventProto(event_list, groups, out.get());
   }
 
   for (const std::string& group : groups) {
@@ -200,7 +200,7 @@
       }
 
       uint32_t i = 0;
-      for (; it->second != &whitelist[i]; i++)
+      for (; it->second != &event_list[i]; i++)
         ;
 
       // The first id used for events in FtraceEvent proto is 3.
diff --git a/tools/gen_amalgamated b/tools/gen_amalgamated
index a527b12..63ad4dd 100755
--- a/tools/gen_amalgamated
+++ b/tools/gen_amalgamated
@@ -61,16 +61,16 @@
 recurse_in_header_deps = '^//protos/.*(cpp|zero)$'
 
 # Compiler flags which aren't filtered out.
-cflag_whitelist = r'^-(W.*|fno-exceptions|fPIC|std.*|fvisibility.*)$'
+cflag_allowlist = r'^-(W.*|fno-exceptions|fPIC|std.*|fvisibility.*)$'
 
 # Linker flags which aren't filtered out.
-ldflag_whitelist = r'^-()$'
+ldflag_allowlist = r'^-()$'
 
 # Libraries which are filtered out.
-lib_blacklist = r'^(c|gcc_eh)$'
+lib_denylist = r'^(c|gcc_eh)$'
 
 # Macros which aren't filtered out.
-define_whitelist = r'^(PERFETTO.*|GOOGLE_PROTOBUF.*)$'
+define_allowlist = r'^(PERFETTO.*|GOOGLE_PROTOBUF.*)$'
 
 # Includes which will be removed from the generated source.
 includes_to_remove = r'^(gtest).*$'
@@ -138,12 +138,12 @@
 """ % tool_name
 
 
-def apply_blacklist(blacklist, items):
-  return [item for item in items if not re.match(blacklist, item)]
+def apply_denylist(denylist, items):
+  return [item for item in items if not re.match(denylist, item)]
 
 
-def apply_whitelist(whitelist, items):
-  return [item for item in items if re.match(whitelist, item)]
+def apply_allowlist(allowlist, items):
+  return [item for item in items if re.match(allowlist, item)]
 
 
 def normalize_path(path):
@@ -297,7 +297,7 @@
         result.append(flag)
       else:
         result[-1] += flag
-    return apply_whitelist(cflag_whitelist, result)
+    return apply_allowlist(cflag_allowlist, result)
 
   def _add_target_flags(self, target_name):
     for target_name in self._iterate_target_and_deps(target_name):
@@ -305,10 +305,10 @@
       self.cflags.update(self._filter_cflags(target.get('cflags', [])))
       self.cflags.update(self._filter_cflags(target.get('cflags_cc', [])))
       self.ldflags.update(
-          apply_whitelist(ldflag_whitelist, target.get('ldflags', [])))
-      self.libs.update(apply_blacklist(lib_blacklist, target.get('libs', [])))
+          apply_allowlist(ldflag_allowlist, target.get('ldflags', [])))
+      self.libs.update(apply_denylist(lib_denylist, target.get('libs', [])))
       self.defines.update(
-          apply_whitelist(define_whitelist, target.get('defines', [])))
+          apply_allowlist(define_allowlist, target.get('defines', [])))
 
   def _add_target_headers(self, target_name):
     target = self.desc[target_name]
diff --git a/tools/gen_android_bp b/tools/gen_android_bp
index 5e61182..c26b284 100755
--- a/tools/gen_android_bp
+++ b/tools/gen_android_bp
@@ -99,7 +99,7 @@
 module_prefix = 'perfetto_'
 
 # Shared libraries which are directly translated to Android system equivalents.
-shared_library_whitelist = [
+shared_library_allowlist = [
     'android',
     'android.hardware.atrace@1.0',
     'android.hardware.health@2.0',
@@ -118,7 +118,7 @@
 ]
 
 # Static libraries which are directly translated to Android system equivalents.
-static_library_whitelist = [
+static_library_allowlist = [
     'statslog_perfetto',
 ]
 
@@ -130,10 +130,10 @@
 tree_path = 'external/perfetto'
 
 # Compiler flags which are passed through to the blueprint.
-cflag_whitelist = r'^-DPERFETTO.*$'
+cflag_allowlist = r'^-DPERFETTO.*$'
 
 # Compiler defines which are passed through to the blueprint.
-define_whitelist = r'^(GOOGLE_PROTO.*)|(ZLIB_.*)|(USE_MMAP)|(HAVE_HIDDEN)$'
+define_allowlist = r'^(GOOGLE_PROTO.*)|(ZLIB_.*)|(USE_MMAP)|(HAVE_HIDDEN)$'
 
 # Shared libraries which are not in PDK.
 library_not_in_pdk = {
@@ -630,10 +630,10 @@
 
 
 def _get_cflags(target):
-  cflags = {flag for flag in target.cflags if re.match(cflag_whitelist, flag)}
+  cflags = {flag for flag in target.cflags if re.match(cflag_allowlist, flag)}
   cflags |= set("-D%s" % define
                 for define in target.defines
-                if re.match(define_whitelist, define))
+                if re.match(define_allowlist, define))
   return cflags
 
 
@@ -701,9 +701,9 @@
       # Generally library names should be mangled as 'libXXX', unless they
       # are HAL libraries (e.g., android.hardware.health@2.0).
       android_lib = lib if '@' in lib else 'lib' + lib
-      if lib in shared_library_whitelist:
+      if lib in shared_library_allowlist:
         module.shared_libs.add(android_lib)
-      if lib in static_library_whitelist:
+      if lib in static_library_allowlist:
         module.static_libs.add(android_lib)
 
   # If the module is a static library, export all the generated headers.
diff --git a/tools/run_ftrace_proto_gen b/tools/run_ftrace_proto_gen
index c8e9ca7..d1770f5 100755
--- a/tools/run_ftrace_proto_gen
+++ b/tools/run_ftrace_proto_gen
@@ -2,7 +2,7 @@
 
 # This script generates .proto files for ftrace events from the /format files
 # in src/traced/probes/ftrace/test/data/*/events/.
-# Only the events in the whitelist are translated.
+# Only the events in the event_list are translated.
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 if [ "$BUILDDIR" == "" ]; then
@@ -23,7 +23,7 @@
 cd "$DIR/.."
 
 "$BUILDDIR/ftrace_proto_gen" \
-  --whitelist_path "$DIR/ftrace_proto_gen/event_whitelist" \
+  --event_list "$DIR/ftrace_proto_gen/event_list" \
   --output_dir "$DIR/../protos/perfetto/trace/ftrace/" \
   --proto_descriptor "$BUILDDIR/$DESCRIPTOR" \
   --update_build_files \
diff --git a/ui/src/tracks/cpu_freq/frontend.ts b/ui/src/tracks/cpu_freq/frontend.ts
index e642e3f..367049a 100644
--- a/ui/src/tracks/cpu_freq/frontend.ts
+++ b/ui/src/tracks/cpu_freq/frontend.ts
@@ -66,7 +66,7 @@
     assertTrue(data.timestamps.length === data.maxFreqKHz.length);
     assertTrue(data.timestamps.length === data.lastIdleValues.length);
 
-    const endPx = Math.floor(timeScale.timeToPx(visibleWindowTime.end));
+    const endPx = timeScale.timeToPx(visibleWindowTime.end);
     const zeroY = MARGIN_TOP + RECT_HEIGHT;
 
     // Quantize the Y axis to quarters of powers of tens (7.5K, 10K, 12.5K).
@@ -96,11 +96,18 @@
       return zeroY - Math.round((value / yMax) * RECT_HEIGHT);
     };
 
+    const [rawStartIdx,] =
+      searchSegment(data.timestamps, visibleWindowTime.start);
+    const [, rawEndIdx] = searchSegment(data.timestamps, visibleWindowTime.end);
+
+    const startIdx = rawStartIdx === -1 ? 0 : rawStartIdx;
+    const endIdx = rawEndIdx === -1 ? data.timestamps.length : rawEndIdx;
+
     ctx.beginPath();
-    ctx.moveTo(calculateX(data.timestamps[0]), zeroY);
+    ctx.moveTo(Math.max(calculateX(data.timestamps[startIdx]), 0), zeroY);
 
     let lastDrawnY = zeroY;
-    for (let i = 0; i < data.timestamps.length; i++) {
+    for (let i = startIdx; i < endIdx; i++) {
       const x = calculateX(data.timestamps[i]);
 
       const minY = calculateY(data.minFreqKHz[i]);
@@ -120,7 +127,7 @@
     }
     // Find the end time for the last frequency event and then draw
     // down to zero to show that we do not have data after that point.
-    const finalX = calculateX(data.maxTsEnd);
+    const finalX = Math.min(calculateX(data.maxTsEnd), endPx);
     ctx.lineTo(finalX, lastDrawnY);
     ctx.lineTo(finalX, zeroY);
     ctx.lineTo(endPx, zeroY);
diff --git a/ui/src/tracks/cpu_slices/frontend.ts b/ui/src/tracks/cpu_slices/frontend.ts
index a51c572..dca1faf 100644
--- a/ui/src/tracks/cpu_slices/frontend.ts
+++ b/ui/src/tracks/cpu_slices/frontend.ts
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-import {search, searchEq} from '../../base/binary_search';
+import {search, searchEq, searchSegment} from '../../base/binary_search';
 import {assertTrue} from '../../base/logging';
 import {Actions} from '../../common/actions';
 import {cropText, drawDoubleHeadedArrow} from '../../common/canvas_utils';
@@ -80,36 +80,23 @@
     ctx.font = '12px Roboto Condensed';
     const charWidth = ctx.measureText('dbpqaouk').width / 8;
 
-    for (let i = 0; i < data.starts.length; i++) {
+    const [rawStartIdx, ] = searchSegment(data.ends, visibleWindowTime.start);
+    const [, rawEndIdx] = searchSegment(data.starts, visibleWindowTime.end);
+
+    const startIdx = rawStartIdx === -1 ? 0 : rawStartIdx;
+    const endIdx = rawEndIdx === -1 ? data.starts.length : rawEndIdx;
+
+    for (let i = startIdx; i < endIdx; i++) {
       const tStart = data.starts[i];
       const tEnd = data.ends[i];
       const utid = data.utids[i];
-      if (tEnd <= visibleWindowTime.start || tStart >= visibleWindowTime.end) {
-        continue;
-      }
+
       const rectStart = timeScale.timeToPx(tStart);
       const rectEnd = timeScale.timeToPx(tEnd);
       const rectWidth = Math.max(1, rectEnd - rectStart);
-      const threadInfo = globals.threads.get(utid);
 
-      // TODO: consider de-duplicating this code with the copied one from
-      // chrome_slices/frontend.ts.
-      let title = `[utid:${utid}]`;
-      let subTitle = '';
-      let pid = -1;
-      if (threadInfo) {
-        if (threadInfo.pid) {
-          pid = threadInfo.pid;
-          let procName = threadInfo.procName || '';
-          if (procName.startsWith('/')) {  // Remove folder paths from name
-            procName = procName.substring(procName.lastIndexOf('/') + 1);
-          }
-          title = `${procName} [${threadInfo.pid}]`;
-          subTitle = `${threadInfo.threadName} [${threadInfo.tid}]`;
-        } else {
-          title = `${threadInfo.threadName} [${threadInfo.tid}]`;
-        }
-      }
+      const threadInfo = globals.threads.get(utid);
+      const pid = threadInfo && threadInfo.pid ? threadInfo.pid : -1;
 
       const isHovering = globals.frontendLocalState.hoveredUtid !== -1;
       const isThreadHovered = globals.frontendLocalState.hoveredUtid === utid;
@@ -133,6 +120,22 @@
       // Don't render text when we have less than 5px to play with.
       if (rectWidth < 5) continue;
 
+      // TODO: consider de-duplicating this code with the copied one from
+      // chrome_slices/frontend.ts.
+      let title = `[utid:${utid}]`;
+      let subTitle = '';
+      if (threadInfo) {
+        if (threadInfo.pid) {
+          let procName = threadInfo.procName || '';
+          if (procName.startsWith('/')) {  // Remove folder paths from name
+            procName = procName.substring(procName.lastIndexOf('/') + 1);
+          }
+          title = `${procName} [${threadInfo.pid}]`;
+          subTitle = `${threadInfo.threadName} [${threadInfo.tid}]`;
+        } else {
+          title = `${threadInfo.threadName} [${threadInfo.tid}]`;
+        }
+      }
       title = cropText(title, charWidth, rectWidth);
       subTitle = cropText(subTitle, charWidth, rectWidth);
       const rectXCenter = rectStart + rectWidth / 2;
diff --git a/ui/src/tracks/process_scheduling/frontend.ts b/ui/src/tracks/process_scheduling/frontend.ts
index d7a5e09..0bef5a6 100644
--- a/ui/src/tracks/process_scheduling/frontend.ts
+++ b/ui/src/tracks/process_scheduling/frontend.ts
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-import {searchEq, searchRange} from '../../base/binary_search';
+import {searchEq, searchRange, searchSegment} from '../../base/binary_search';
 import {assertTrue} from '../../base/logging';
 import {TrackState} from '../../common/state';
 import {checkerboardExcept} from '../../frontend/checkerboard';
@@ -68,16 +68,20 @@
     assertTrue(data.starts.length === data.ends.length);
     assertTrue(data.starts.length === data.utids.length);
 
+    const [rawStartIdx, ] = searchSegment(data.ends, visibleWindowTime.start);
+    const [, rawEndIdx] = searchSegment(data.starts, visibleWindowTime.end);
+
+    const startIdx = rawStartIdx === -1 ? 0 : rawStartIdx;
+    const endIdx = rawEndIdx === -1 ? data.starts.length : rawEndIdx;
+
     const cpuTrackHeight = Math.floor(RECT_HEIGHT / data.maxCpu);
 
-    for (let i = 0; i < data.starts.length; i++) {
+    for (let i = startIdx; i < endIdx; i++) {
       const tStart = data.starts[i];
       const tEnd = data.ends[i];
       const utid = data.utids[i];
       const cpu = data.cpus[i];
-      if (tEnd <= visibleWindowTime.start || tStart >= visibleWindowTime.end) {
-        continue;
-      }
+
       const rectStart = timeScale.timeToPx(tStart);
       const rectEnd = timeScale.timeToPx(tEnd);
       const rectWidth = rectEnd - rectStart;