Merge "amalgamator: normalize paths, make output stable"
diff --git a/Android.bp b/Android.bp
index d721882..dc3fb3c 100644
--- a/Android.bp
+++ b/Android.bp
@@ -5938,6 +5938,7 @@
     "src/trace_processor/sched_slice_table.cc",
     "src/trace_processor/span_join_operator_table.cc",
     "src/trace_processor/sql_stats_table.cc",
+    "src/trace_processor/sqlite_experimental_flamegraph_table.cc",
     "src/trace_processor/sqlite_raw_table.cc",
     "src/trace_processor/stats_table.cc",
     "src/trace_processor/storage_columns.cc",
@@ -6096,6 +6097,7 @@
     "src/trace_processor/heap_profile_tracker_unittest.cc",
     "src/trace_processor/importers/fuchsia/fuchsia_trace_utils_unittest.cc",
     "src/trace_processor/importers/proto/args_table_utils_unittest.cc",
+    "src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc",
     "src/trace_processor/importers/proto/heap_graph_walker_unittest.cc",
     "src/trace_processor/importers/proto/proto_trace_parser_unittest.cc",
     "src/trace_processor/importers/systrace/systrace_parser_unittest.cc",
diff --git a/BUILD b/BUILD
index b62b186..0cc3c33 100644
--- a/BUILD
+++ b/BUILD
@@ -481,6 +481,7 @@
         "include/perfetto/tracing/track_event.h",
         "include/perfetto/tracing/track_event_category_registry.h",
         "include/perfetto/tracing/track_event_interned_data_index.h",
+        "include/perfetto/tracing/track_event_legacy.h",
     ],
 )
 
@@ -808,6 +809,8 @@
         "src/trace_processor/span_join_operator_table.h",
         "src/trace_processor/sql_stats_table.cc",
         "src/trace_processor/sql_stats_table.h",
+        "src/trace_processor/sqlite_experimental_flamegraph_table.cc",
+        "src/trace_processor/sqlite_experimental_flamegraph_table.h",
         "src/trace_processor/sqlite_raw_table.cc",
         "src/trace_processor/sqlite_raw_table.h",
         "src/trace_processor/stats_table.cc",
@@ -1492,9 +1495,7 @@
     srcs = [
         "protos/perfetto/config/perfetto_config.proto",
     ],
-    visibility = [
-        "//visibility:public",
-    ],
+    visibility = PERFETTO_CONFIG.public_visibility,
 )
 
 # GN target: //protos/perfetto/config/power:cpp
@@ -1613,9 +1614,7 @@
         "protos/perfetto/config/test_config.proto",
         "protos/perfetto/config/trace_config.proto",
     ],
-    visibility = [
-        "//visibility:public",
-    ],
+    visibility = PERFETTO_CONFIG.public_visibility,
     deps = [
         ":protos_perfetto_common_protos",
         ":protos_perfetto_config_android_protos",
@@ -1786,9 +1785,7 @@
         "protos/perfetto/metrics/android/unmapped_java_symbols.proto",
         "protos/perfetto/metrics/android/unsymbolized_frames.proto",
     ],
-    visibility = [
-        "//visibility:public",
-    ],
+    visibility = PERFETTO_CONFIG.public_visibility,
 )
 
 # GN target: //protos/perfetto/metrics/android:zero
@@ -1813,9 +1810,7 @@
     srcs = [
         "protos/perfetto/metrics/metrics.proto",
     ],
-    visibility = [
-        "//visibility:public",
-    ],
+    visibility = PERFETTO_CONFIG.public_visibility,
     deps = [
         ":protos_perfetto_metrics_android_protos",
     ],
@@ -2056,9 +2051,7 @@
     srcs = [
         "protos/perfetto/trace/perfetto_trace.proto",
     ],
-    visibility = [
-        "//visibility:public",
-    ],
+    visibility = PERFETTO_CONFIG.public_visibility,
 )
 
 # GN target: //protos/perfetto/trace:minimal_lite
@@ -2119,9 +2112,7 @@
         "protos/perfetto/trace/trace_packet.proto",
         "protos/perfetto/trace/trace_packet_defaults.proto",
     ],
-    visibility = [
-        "//visibility:public",
-    ],
+    visibility = PERFETTO_CONFIG.public_visibility,
     deps = [
         ":protos_perfetto_common_protos",
         ":protos_perfetto_config_android_protos",
diff --git a/BUILD.gn b/BUILD.gn
index ea24c77..c603a03 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -39,7 +39,7 @@
   ]
 }
 
-if (enable_perfetto_trace_processor) {
+if (enable_perfetto_trace_processor && enable_perfetto_trace_processor_sqlite) {
   all_targets += [ "src/trace_processor:trace_processor_shell" ]
 }
 
@@ -228,10 +228,6 @@
       "src/trace_processor:export_json",
       "src/trace_processor:storage_minimal",
       "src/tracing",
-      "src/tracing:client_api",
-
-      # TODO(eseckler): Create a platform for chrome and hook it up somehow.
-      "src/tracing:platform_fake",
     ]
     configs -= [ "//build/config/compiler:chromium_code" ]
     configs += [ "//build/config/compiler:no_chromium_code" ]
@@ -239,7 +235,6 @@
       "include/perfetto/ext/trace_processor:export_json",
       "include/perfetto/ext/tracing/core",
       "include/perfetto/trace_processor:storage",
-      "include/perfetto/tracing",
       "protos/perfetto/common:zero",
       "protos/perfetto/trace:zero",
       "protos/perfetto/trace/chrome:zero",
diff --git a/bazel/rules.bzl b/bazel/rules.bzl
index 7fca94d..498f34a 100644
--- a/bazel/rules.bzl
+++ b/bazel/rules.bzl
@@ -22,7 +22,9 @@
 def default_cc_args():
     return {
         "deps": PERFETTO_CONFIG.deps.build_config,
-        "copts": [],
+        "copts": [
+            "-Wno-pragma-system-header-outside-header",
+        ],
         "includes": ["include"],
         "linkopts": select({
             "@perfetto//bazel:os_linux": ["-ldl", "-lrt", "-lpthread"],
diff --git a/bazel/standalone/perfetto_cfg.bzl b/bazel/standalone/perfetto_cfg.bzl
index 1e8f6d0..d441c06 100644
--- a/bazel/standalone/perfetto_cfg.bzl
+++ b/bazel/standalone/perfetto_cfg.bzl
@@ -55,6 +55,13 @@
         sqlite = [],
     ),
 
+    # Allow Bazel embedders to change the visibility of "public" targets.
+    # This variable has been introduced to limit the change to Bazel and avoid
+    # making the targets fully public in the google internal tree.
+    public_visibility = [
+        "//visibility:public",
+    ],
+
     # Allow Bazel embedders to change the visibility of the proto targets.
     # This variable has been introduced to limit the change to Bazel and avoid
     # making the targets public in the google internal tree.
diff --git a/gn/BUILD.gn b/gn/BUILD.gn
index 3440b62..7412aa3 100644
--- a/gn/BUILD.gn
+++ b/gn/BUILD.gn
@@ -254,16 +254,18 @@
   }
 }
 
-group("sqlite") {
-  if (perfetto_root_path == "//") {
-    public_deps = [
-      "//buildtools:sqlite",
-    ]
-  } else {
-    public_deps = [
-      "//third_party/sqlite:sqlite",
-    ]
-    public_configs = [ ":sqlite_third_party_include_path" ]
+if (enable_perfetto_trace_processor_sqlite) {
+  group("sqlite") {
+    if (perfetto_root_path == "//") {
+      public_deps = [
+        "//buildtools:sqlite",
+      ]
+    } else {
+      public_deps = [
+        "//third_party/sqlite:sqlite",
+      ]
+      public_configs = [ ":sqlite_third_party_include_path" ]
+    }
   }
 }
 
diff --git a/gn/perfetto.gni b/gn/perfetto.gni
index 563db03..57c27aa 100644
--- a/gn/perfetto.gni
+++ b/gn/perfetto.gni
@@ -200,6 +200,11 @@
   perfetto_verbose_logs_enabled =
       !build_with_chromium || perfetto_force_dlog == "on"
 
+  # Enables the SQL query layer of trace processor.
+  enable_perfetto_trace_processor_sqlite =
+      enable_perfetto_trace_processor &&
+      (build_with_chromium || !perfetto_build_with_embedder)
+
   # Enables the optional SQLite percentile module.
   enable_perfetto_trace_processor_percentile =
       enable_perfetto_trace_processor && perfetto_build_standalone
@@ -237,10 +242,12 @@
       !(build_with_chromium && is_android)
 
   # Enables the trace_to_text tool.
-  enable_perfetto_tools_trace_to_text = enable_perfetto_tools
+  enable_perfetto_tools_trace_to_text =
+      enable_perfetto_tools && enable_perfetto_trace_processor_sqlite
 
   # Allows to build the UI (TypeScript/ HTML / WASM)
-  enable_perfetto_ui = perfetto_build_standalone
+  enable_perfetto_ui =
+      perfetto_build_standalone && enable_perfetto_trace_processor_sqlite
 }
 
 # +---------------------------------------------------------------------------+
diff --git a/gn/perfetto_unittests.gni b/gn/perfetto_unittests.gni
index ead0c8b..11ba49f 100644
--- a/gn/perfetto_unittests.gni
+++ b/gn/perfetto_unittests.gni
@@ -61,8 +61,9 @@
 }
 
 if (enable_perfetto_trace_processor) {
-  perfetto_unittests_targets += [
-    "src/trace_processor:unittests",
-    "src/trace_processor/metrics:unittests",
-  ]
+  perfetto_unittests_targets += [ "src/trace_processor:unittests" ]
+
+  if (enable_perfetto_trace_processor_sqlite) {
+    perfetto_unittests_targets += [ "src/trace_processor/metrics:unittests" ]
+  }
 }
diff --git a/include/perfetto/tracing.h b/include/perfetto/tracing.h
index be9ef6d..fca7c70 100644
--- a/include/perfetto/tracing.h
+++ b/include/perfetto/tracing.h
@@ -33,5 +33,6 @@
 #include "perfetto/tracing/tracing_backend.h"
 #include "perfetto/tracing/track_event.h"
 #include "perfetto/tracing/track_event_interned_data_index.h"
+#include "perfetto/tracing/track_event_legacy.h"
 
 #endif  // INCLUDE_PERFETTO_TRACING_H_
diff --git a/include/perfetto/tracing/BUILD.gn b/include/perfetto/tracing/BUILD.gn
index 60f4d4e..5c7a79f 100644
--- a/include/perfetto/tracing/BUILD.gn
+++ b/include/perfetto/tracing/BUILD.gn
@@ -45,5 +45,6 @@
     "track_event.h",
     "track_event_category_registry.h",
     "track_event_interned_data_index.h",
+    "track_event_legacy.h",
   ]
 }
diff --git a/include/perfetto/tracing/event_context.h b/include/perfetto/tracing/event_context.h
index da1607b..fe9c4bb 100644
--- a/include/perfetto/tracing/event_context.h
+++ b/include/perfetto/tracing/event_context.h
@@ -35,9 +35,15 @@
 //                       dbg->set_int_value(1234);
 //                     });
 //
-class EventContext {
+class PERFETTO_EXPORT EventContext {
  public:
   EventContext(EventContext&&) = default;
+
+  // For Chromium during the transition phase to the client library.
+  // TODO(eseckler): Remove once Chromium has switched to client lib entirely.
+  explicit EventContext(protos::pbzero::TrackEvent* event)
+      : event_(event), incremental_state_(nullptr) {}
+
   ~EventContext();
 
   protos::pbzero::TrackEvent* event() const { return event_; }
diff --git a/include/perfetto/tracing/track_event_legacy.h b/include/perfetto/tracing/track_event_legacy.h
new file mode 100644
index 0000000..8513024
--- /dev/null
+++ b/include/perfetto/tracing/track_event_legacy.h
@@ -0,0 +1,846 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACK_EVENT_LEGACY_H_
+#define INCLUDE_PERFETTO_TRACING_TRACK_EVENT_LEGACY_H_
+
+// This file defines a compatibility shim between legacy (Chrome, V8) trace
+// event macros and track events. To avoid accidentally introducing legacy
+// events in new code, the PERFETTO_ENABLE_LEGACY_TRACE_EVENTS macro must be set
+// to 1 activate the compatibility layer.
+
+#include "perfetto/base/compiler.h"
+
+#include <stdint.h>
+
+#ifndef PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
+#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 0
+#endif
+
+#if PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
+
+// Ignore GCC warning about a missing argument for a variadic macro parameter.
+#pragma GCC system_header
+
+// ----------------------------------------------------------------------------
+// Constants.
+// ----------------------------------------------------------------------------
+
+// The following constants are defined in the global namespace, since they were
+// originally implemented as macros.
+
+// Event phases.
+static constexpr char TRACE_EVENT_PHASE_BEGIN = 'B';
+static constexpr char TRACE_EVENT_PHASE_END = 'E';
+static constexpr char TRACE_EVENT_PHASE_COMPLETE = 'X';
+static constexpr char TRACE_EVENT_PHASE_INSTANT = 'I';
+static constexpr char TRACE_EVENT_PHASE_ASYNC_BEGIN = 'S';
+static constexpr char TRACE_EVENT_PHASE_ASYNC_STEP_INTO = 'T';
+static constexpr char TRACE_EVENT_PHASE_ASYNC_STEP_PAST = 'p';
+static constexpr char TRACE_EVENT_PHASE_ASYNC_END = 'F';
+static constexpr char TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN = 'b';
+static constexpr char TRACE_EVENT_PHASE_NESTABLE_ASYNC_END = 'e';
+static constexpr char TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT = 'n';
+static constexpr char TRACE_EVENT_PHASE_FLOW_BEGIN = 's';
+static constexpr char TRACE_EVENT_PHASE_FLOW_STEP = 't';
+static constexpr char TRACE_EVENT_PHASE_FLOW_END = 'f';
+static constexpr char TRACE_EVENT_PHASE_METADATA = 'M';
+static constexpr char TRACE_EVENT_PHASE_COUNTER = 'C';
+static constexpr char TRACE_EVENT_PHASE_SAMPLE = 'P';
+static constexpr char TRACE_EVENT_PHASE_CREATE_OBJECT = 'N';
+static constexpr char TRACE_EVENT_PHASE_SNAPSHOT_OBJECT = 'O';
+static constexpr char TRACE_EVENT_PHASE_DELETE_OBJECT = 'D';
+static constexpr char TRACE_EVENT_PHASE_MEMORY_DUMP = 'v';
+static constexpr char TRACE_EVENT_PHASE_MARK = 'R';
+static constexpr char TRACE_EVENT_PHASE_CLOCK_SYNC = 'c';
+static constexpr char TRACE_EVENT_PHASE_ENTER_CONTEXT = '(';
+static constexpr char TRACE_EVENT_PHASE_LEAVE_CONTEXT = ')';
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+static constexpr uint32_t TRACE_EVENT_FLAG_NONE = 0;
+static constexpr uint32_t TRACE_EVENT_FLAG_COPY = 1u << 0;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_ID = 1u << 1;
+// TODO(crbug.com/639003): Free this bit after ID mangling is deprecated.
+static constexpr uint32_t TRACE_EVENT_FLAG_MANGLE_ID = 1u << 2;
+static constexpr uint32_t TRACE_EVENT_FLAG_SCOPE_OFFSET = 1u << 3;
+static constexpr uint32_t TRACE_EVENT_FLAG_SCOPE_EXTRA = 1u << 4;
+static constexpr uint32_t TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP = 1u << 5;
+static constexpr uint32_t TRACE_EVENT_FLAG_ASYNC_TTS = 1u << 6;
+static constexpr uint32_t TRACE_EVENT_FLAG_BIND_TO_ENCLOSING = 1u << 7;
+static constexpr uint32_t TRACE_EVENT_FLAG_FLOW_IN = 1u << 8;
+static constexpr uint32_t TRACE_EVENT_FLAG_FLOW_OUT = 1u << 9;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_CONTEXT_ID = 1u << 10;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_PROCESS_ID = 1u << 11;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_LOCAL_ID = 1u << 12;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_GLOBAL_ID = 1u << 13;
+// TODO(eseckler): Remove once we have native support for typed proto events in
+// TRACE_EVENT macros.
+static constexpr uint32_t TRACE_EVENT_FLAG_TYPED_PROTO_ARGS = 1u << 15;
+static constexpr uint32_t TRACE_EVENT_FLAG_JAVA_STRING_LITERALS = 1u << 16;
+
+static constexpr uint32_t TRACE_EVENT_FLAG_SCOPE_MASK =
+    TRACE_EVENT_FLAG_SCOPE_OFFSET | TRACE_EVENT_FLAG_SCOPE_EXTRA;
+
+// Type values for identifying types in the TraceValue union.
+static constexpr uint8_t TRACE_VALUE_TYPE_BOOL = 1;
+static constexpr uint8_t TRACE_VALUE_TYPE_UINT = 2;
+static constexpr uint8_t TRACE_VALUE_TYPE_INT = 3;
+static constexpr uint8_t TRACE_VALUE_TYPE_DOUBLE = 4;
+static constexpr uint8_t TRACE_VALUE_TYPE_POINTER = 5;
+static constexpr uint8_t TRACE_VALUE_TYPE_STRING = 6;
+static constexpr uint8_t TRACE_VALUE_TYPE_COPY_STRING = 7;
+static constexpr uint8_t TRACE_VALUE_TYPE_CONVERTABLE = 8;
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+static constexpr uint8_t TRACE_EVENT_SCOPE_GLOBAL = 0u << 3;
+static constexpr uint8_t TRACE_EVENT_SCOPE_PROCESS = 1u << 3;
+static constexpr uint8_t TRACE_EVENT_SCOPE_THREAD = 2u << 3;
+
+static constexpr char TRACE_EVENT_SCOPE_NAME_GLOBAL = 'g';
+static constexpr char TRACE_EVENT_SCOPE_NAME_PROCESS = 'p';
+static constexpr char TRACE_EVENT_SCOPE_NAME_THREAD = 't';
+
+// ----------------------------------------------------------------------------
+// Internal legacy trace point implementation.
+// ----------------------------------------------------------------------------
+
+// A black hole trace point where unsupported trace events are routed.
+#define PERFETTO_INTERNAL_EVENT_NOOP(cat, name, ...) \
+  do {                                               \
+    if (false) {                                     \
+      ::perfetto::base::ignore_result(cat);          \
+      ::perfetto::base::ignore_result(name);         \
+    }                                                \
+  } while (false)
+
+// Implementations for the INTERNAL_* adapter macros used by the trace points
+// below.
+#define INTERNAL_TRACE_EVENT_ADD(...) PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(...) \
+  PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(...) \
+  PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(...) \
+  PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(...) \
+  PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(...) \
+  PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(...) \
+  PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
+
+#define INTERNAL_TRACE_TIME_TICKS_NOW() 0
+#define INTERNAL_TRACE_TIME_NOW() 0
+
+// ----------------------------------------------------------------------------
+// Legacy tracing common API (adapted from trace_event_common.h).
+// ----------------------------------------------------------------------------
+
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// Scoped events.
+#define TRACE_EVENT0(category_group, name) \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags)  \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+                                            flow_flags)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags,  \
+                               arg1_name, arg1_val)                        \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+                                            flow_flags, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name,   \
+                     arg2_val)                                               \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
+                                  arg2_name, arg2_val)
+#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags,    \
+                               arg1_name, arg1_val, arg2_name, arg2_val)     \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id,   \
+                                            flow_flags, arg1_name, arg1_val, \
+                                            arg2_name, arg2_val)
+
+// Instant events.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope)                   \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+                             arg2_name, arg2_val)                              \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope)              \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name,   \
+                                  arg1_val)                                 \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name,      \
+                                  arg1_val, arg2_name, arg2_val)               \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_INSTANT_WITH_FLAGS0(category_group, name, scope_and_flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           scope_and_flags)
+#define TRACE_EVENT_INSTANT_WITH_FLAGS1(category_group, name, scope_and_flags, \
+                                        arg1_name, arg1_val)                   \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           scope_and_flags, arg1_name, arg1_val)
+
+// Instant events with explicit timestamps.
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope,   \
+                                            timestamp)                     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_INSTANT,       \
+                                          category_group, name, timestamp, \
+                                          TRACE_EVENT_FLAG_NONE | scope)
+
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP1(category_group, name, scope,  \
+                                            timestamp, arg_name, arg_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
+      TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp,         \
+      TRACE_EVENT_FLAG_NONE | scope, arg_name, arg_val)
+
+// Begin events.
+#define TRACE_EVENT_BEGIN0(category_group, name)                          \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val)     \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val,     \
+                           arg2_name, arg2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val,    \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_BEGIN_WITH_FLAGS0(category_group, name, flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, flags)
+#define TRACE_EVENT_BEGIN_WITH_FLAGS1(category_group, name, flags, arg1_name, \
+                                      arg1_val)                               \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,     \
+                           flags, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+                                arg2_name, arg2_val)                       \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val,     \
+                           arg2_name, arg2_val)
+
+// Begin events with explicit timestamps.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+                                                     thread_id, timestamp)     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id,      \
+      timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(                \
+    category_group, name, id, thread_id, timestamp)                       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1(                \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2(                \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val,  \
+    arg2_name, arg2_val)                                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name,   \
+      arg2_val)
+
+// End events.
+#define TRACE_EVENT_END0(category_group, name)                          \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val)     \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \
+                         arg2_val)                                             \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,        \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val,         \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_END_WITH_FLAGS0(category_group, name, flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags)
+#define TRACE_EVENT_END_WITH_FLAGS1(category_group, name, flags, arg1_name,    \
+                                    arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags, \
+                           arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+                              arg2_name, arg2_val)                       \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val,   \
+                           arg2_name, arg2_val)
+
+// Mark events.
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_MARK,          \
+                                          category_group, name, timestamp, \
+                                          TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
+                                         arg1_name, arg1_val)             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
+      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,            \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP2(                                      \
+    category_group, name, timestamp, arg1_name, arg1_val, arg2_name, arg2_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                     \
+      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,                 \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_MARK(category_group, name)                      \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY)
+
+#define TRACE_EVENT_COPY_MARK1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+
+#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_MARK,             \
+                                          category_group, name, timestamp,    \
+                                          TRACE_EVENT_FLAG_COPY)
+
+// End events with explicit thread and timestamp.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+                                                   thread_id, timestamp)     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,      \
+      timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0(                \
+    category_group, name, id, thread_id, timestamp)                     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1(                 \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                    \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,  \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2(                 \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+    arg2_name, arg2_val)                                                 \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                    \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,  \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name,  \
+      arg2_val)
+
+// Counters.
+#define TRACE_COUNTER1(category_group, name, value)                         \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, "value",                  \
+                           static_cast<int>(value))
+#define TRACE_COUNTER_WITH_FLAG1(category_group, name, flag, value)         \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           flag, "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value)                    \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY, "value",                  \
+                           static_cast<int>(value))
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val,       \
+                       value2_name, value2_val)                             \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, value1_name,              \
+                           static_cast<int>(value1_val), value2_name,       \
+                           static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val,  \
+                            value2_name, value2_val)                        \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY, value1_name,              \
+                           static_cast<int>(value1_val), value2_name,       \
+                           static_cast<int>(value2_val))
+
+// Counters with explicit timestamps.
+#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                    \
+      TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp,             \
+      TRACE_EVENT_FLAG_NONE, "value", static_cast<int>(value))
+
+#define TRACE_COUNTER_WITH_TIMESTAMP2(category_group, name, timestamp,      \
+                                      value1_name, value1_val, value2_name, \
+                                      value2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                  \
+      TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp,           \
+      TRACE_EVENT_FLAG_NONE, value1_name, static_cast<int>(value1_val),     \
+      value2_name, static_cast<int>(value2_val))
+
+// Counters with ids.
+#define TRACE_COUNTER_ID1(category_group, name, id, value)                    \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE, "value",  \
+                                   static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value)               \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY, "value",  \
+                                   static_cast<int>(value))
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val,  \
+                          value2_name, value2_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE,           \
+                                   value1_name, static_cast<int>(value1_val), \
+                                   value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name,         \
+                               value1_val, value2_name, value2_val)           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY,           \
+                                   value1_name, static_cast<int>(value1_val), \
+                                   value2_name, static_cast<int>(value2_val))
+
+// Sampling profiler events.
+#define TRACE_EVENT_SAMPLE_WITH_ID1(category_group, name, id, arg1_name,       \
+                                    arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SAMPLE, category_group,   \
+                                   name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+                                   arg1_val)
+
+// Legacy async events.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+                                   category_group, name, id,      \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                 arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,     \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                 arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+                                   category_group, name, id,      \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                      arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,          \
+                                   category_group, name, id,               \
+                                   TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                      arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                        \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,             \
+      TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_FLAGS0(category_group, name, id, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,            \
+                                   category_group, name, id, flags)
+
+// Legacy async events with explicit timestamps.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+                                                timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP1(                           \
+    category_group, name, id, timestamp, arg1_name, arg1_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,             \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id,      \
+                                                timestamp, arg1_name,          \
+                                                arg1_val, arg2_name, arg2_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,     \
+      arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+                                                     timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0(     \
+    category_group, name, id, timestamp, flags)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
+
+// Legacy async step into events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+                                     arg1_name, arg1_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
+      TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id,   \
+      TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Legacy async step into events with timestamps.
+#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \
+                                                    step, timestamp)          \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      "step", step)
+
+// Legacy async step past events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+                                     arg1_name, arg1_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
+      TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id,   \
+      TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Legacy async end events.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+                                   category_group, name, id,    \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,               \
+                                   category_group, name, id,                  \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+                               arg2_name, arg2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                           \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+                                   category_group, name, id,    \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+                                    arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,          \
+                                   category_group, name, id,             \
+                                   TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+                                    arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                      \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,             \
+      TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_ASYNC_END_WITH_FLAGS0(category_group, name, id, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,            \
+                                   category_group, name, id, flags)
+
+// Legacy async end events with explicit timestamps.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+                                              timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP1(category_group, name, id,       \
+                                              timestamp, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id,       \
+                                              timestamp, arg1_name, arg1_val, \
+                                              arg2_name, arg2_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+                                                   timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(category_group, name, \
+                                                        id, timestamp, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
+
+// Async events.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+                                   category_group, name, id,               \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                          arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN,     \
+                                   category_group, name, id,                   \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                          arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Async end events.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+                                   category_group, name, id,             \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \
+                                        arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END,     \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+                                        arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                          \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Async instant events.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id,        \
+                                            arg1_name, arg1_val)             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(                              \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                       \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2(                       \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+      arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2(                         \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,          \
+      TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+      arg2_name, arg2_val)
+
+// Async events with explicit timestamps.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
+                                                         id, timestamp)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
+                                                       id, timestamp)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,        \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP1(                    \
+    category_group, name, id, timestamp, arg1_name, arg1_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,      \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT_WITH_TIMESTAMP0(               \
+    category_group, name, id, timestamp)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(          \
+    category_group, name, id, timestamp)                                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(          \
+    category_group, name, id, timestamp)                              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                 \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Legacy flow events.
+#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+                                   category_group, name, id,     \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN,               \
+                                   category_group, name, id,                   \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
+                                arg2_name, arg2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id,                  \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+                                   category_group, name, id,     \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
+                                     arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN,          \
+                                   category_group, name, id,              \
+                                   TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
+                                     arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                       \
+      TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id,             \
+      TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Legacy flow step events.
+#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+                                   category_group, name, id,    \
+                                   TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+                               arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                       \
+      TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id,              \
+      TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP,     \
+                                   category_group, name, id,        \
+                                   TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+                                    arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id,                   \
+      TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val)
+
+// Legacy flow end events.
+#define TRACE_EVENT_FLOW_END0(category_group, name, id)                        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id)      \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id,                                   \
+                                   TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+                                   arg1_val)
+#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val,   \
+                              arg2_name, arg2_val)                             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+                                   arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id)                   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name,        \
+                                   arg1_val)                                   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+                                   arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name,        \
+                                   arg1_val, arg2_name, arg2_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+                                   arg1_val, arg2_name, arg2_val)
+
+// Special strongly typed trace events.
+// TODO(skyostil): Migrate these to regular track event trace points.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+  if (false) {                                   \
+    base::ignore_result(run_function);           \
+    base::ignore_result(task);                   \
+  }
+
+#define TRACE_LOG_MESSAGE(file, message, line) \
+  if (false) {                                 \
+    base::ignore_result(file);                 \
+    base::ignore_result(message);              \
+    base::ignore_result(line);                 \
+  }
+
+// Metadata events.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
+// Clock sync events.
+#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id)                           \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata",     \
+                           "clock_sync", TRACE_EVENT_FLAG_NONE, "sync_id", \
+                           sync_id)
+#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                    \
+      TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", issue_end_ts, \
+      TRACE_EVENT_FLAG_NONE, "sync_id", sync_id, "issue_ts", issue_ts)
+
+// Object events.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_CREATE_OBJECT,  \
+                                   category_group, name, id,         \
+                                   TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+                                            snapshot)                 \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
+      TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, id,    \
+      TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP(                 \
+    category_group, name, id, timestamp, snapshot)                         \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, id,         \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_DELETE_OBJECT,  \
+                                   category_group, name, id,         \
+                                   TRACE_EVENT_FLAG_NONE)
+
+// Context events.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context)    \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ENTER_CONTEXT, \
+                                   category_group, name, context,   \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context)    \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_LEAVE_CONTEXT, \
+                                   category_group, name, context,   \
+                                   TRACE_EVENT_FLAG_NONE)
+
+// Macro to efficiently determine if a given category group is enabled.
+// TODO(skyostil): Implement.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+  do {                                                          \
+    *ret = false;                                               \
+  } while (0)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+// TODO(skyostil): Implement.
+#define TRACE_EVENT_IS_NEW_TRACE(ret) \
+  do {                                \
+    *ret = false;                     \
+  } while (0)
+
+// Time queries.
+#define TRACE_TIME_TICKS_NOW() INTERNAL_TRACE_TIME_TICKS_NOW()
+#define TRACE_TIME_NOW() INTERNAL_TRACE_TIME_NOW()
+
+// ----------------------------------------------------------------------------
+// Legacy tracing API (adapted from trace_event.h).
+// ----------------------------------------------------------------------------
+
+// We can implement the following subset of the legacy tracing API without
+// involvement from the embedder. APIs such as TraceId and
+// TRACE_EVENT_API_ADD_TRACE_EVENT are still up to the embedder to define.
+
+#define TRACE_STR_COPY(str) (str)
+
+// TODO(skyostil): Implement properly using CategoryRegistry.
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category) \
+  [&] {                                                      \
+    static uint8_t enabled;                                  \
+    TRACE_EVENT_CATEGORY_GROUP_ENABLED(category, &enabled);  \
+    return &enabled;                                         \
+  }()
+
+#endif  // PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACK_EVENT_LEGACY_H_
diff --git a/src/base/test/utils.h b/src/base/test/utils.h
index 673362a..9f61eac 100644
--- a/src/base/test/utils.h
+++ b/src/base/test/utils.h
@@ -30,10 +30,20 @@
 
 #else  // PERFETTO_DCHECK_IS_ON()
 
+// Since PERFETTO_DCHECK_IS_ON() is false these statements should not die (if
+// they should/do we should use EXPECT/ASSERT DEATH_TEST_IF_SUPPORTED directly).
+// Therefore if the platform supports DEATH_TESTS we can use the handy
+// GTEST_EXECUTE_STATEMENT_ which prevents optimizing the code away, and if not
+// we just fall back on executing the code directly.
+#if defined(GTEST_EXECUTE_STATEMENT_)
 #define EXPECT_DCHECK_DEATH(statement) \
     GTEST_EXECUTE_STATEMENT_(statement, "PERFETTO_CHECK")
 #define ASSERT_DCHECK_DEATH(statement) \
     GTEST_EXECUTE_STATEMENT_(statement, "PERFETTO_CHECK")
+#else
+#define EXPECT_DCHECK_DEATH(statement) [&]() { statement }()
+#define ASSERT_DCHECK_DEATH(statement) [&]() { statement }()
+#endif  //  defined(GTEST_EXECUTE_STATEMENT_)
 
 #endif  // PERFETTO_DCHECK_IS_ON()
 
diff --git a/src/trace_processor/BUILD.gn b/src/trace_processor/BUILD.gn
index ebabeb3..97e7ed0 100644
--- a/src/trace_processor/BUILD.gn
+++ b/src/trace_processor/BUILD.gn
@@ -23,11 +23,13 @@
 
 # The library which eases processing of Perfetto traces by exposing reading
 # friendly APIs.
-static_library("trace_processor") {
-  complete_static_lib = true
-  deps = [
-    ":lib",
-  ]
+if (enable_perfetto_trace_processor_sqlite) {
+  static_library("trace_processor") {
+    complete_static_lib = true
+    deps = [
+      ":lib",
+    ]
+  }
 }
 
 if (enable_perfetto_ui) {
@@ -286,94 +288,98 @@
   }
 }
 
-source_set("lib") {
-  sources = [
-    "filtered_row_index.cc",
-    "filtered_row_index.h",
-    "read_trace.cc",
-    "row_iterators.cc",
-    "row_iterators.h",
-    "sched_slice_table.cc",
-    "sched_slice_table.h",
-    "span_join_operator_table.cc",
-    "span_join_operator_table.h",
-    "sql_stats_table.cc",
-    "sql_stats_table.h",
-    "sqlite_raw_table.cc",
-    "sqlite_raw_table.h",
-    "stats_table.cc",
-    "stats_table.h",
-    "storage_columns.cc",
-    "storage_columns.h",
-    "storage_schema.cc",
-    "storage_schema.h",
-    "storage_table.cc",
-    "storage_table.h",
-    "trace_processor.cc",
-    "trace_processor_impl.cc",
-    "trace_processor_impl.h",
-    "window_operator_table.cc",
-    "window_operator_table.h",
-  ]
+if (enable_perfetto_trace_processor_sqlite) {
+  source_set("lib") {
+    sources = [
+      "filtered_row_index.cc",
+      "filtered_row_index.h",
+      "read_trace.cc",
+      "row_iterators.cc",
+      "row_iterators.h",
+      "sched_slice_table.cc",
+      "sched_slice_table.h",
+      "span_join_operator_table.cc",
+      "span_join_operator_table.h",
+      "sql_stats_table.cc",
+      "sql_stats_table.h",
+      "sqlite_experimental_flamegraph_table.cc",
+      "sqlite_experimental_flamegraph_table.h",
+      "sqlite_raw_table.cc",
+      "sqlite_raw_table.h",
+      "stats_table.cc",
+      "stats_table.h",
+      "storage_columns.cc",
+      "storage_columns.h",
+      "storage_schema.cc",
+      "storage_schema.h",
+      "storage_table.cc",
+      "storage_table.h",
+      "trace_processor.cc",
+      "trace_processor_impl.cc",
+      "trace_processor_impl.h",
+      "window_operator_table.cc",
+      "window_operator_table.h",
+    ]
 
-  deps = [
-    ":storage_full",
-    "../../gn:default_deps",
-    "../../gn:sqlite",
-    "../../protos/perfetto/metrics:zero",
-    "../../protos/perfetto/metrics/android:zero",
-    "../../protos/perfetto/trace/ftrace:zero",
-    "../base",
-    "db:lib",
-    "metrics:lib",
-    "sqlite",
-    "tables",
-    "types",
-  ]
-  public_deps = [
-    "../../include/perfetto/trace_processor",
-  ]
-  if (enable_perfetto_trace_processor_json) {
-    deps += [ ":export_json" ]
+    deps = [
+      ":storage_full",
+      "../../gn:default_deps",
+      "../../gn:sqlite",
+      "../../protos/perfetto/metrics:zero",
+      "../../protos/perfetto/metrics/android:zero",
+      "../../protos/perfetto/trace/ftrace:zero",
+      "../base",
+      "db:lib",
+      "metrics:lib",
+      "sqlite",
+      "tables",
+      "types",
+    ]
+    public_deps = [
+      "../../include/perfetto/trace_processor",
+    ]
+    if (enable_perfetto_trace_processor_json) {
+      deps += [ ":export_json" ]
+    }
   }
-}
 
-perfetto_host_executable("trace_processor_shell") {
-  deps = [
-    ":lib",
-    "../../gn:default_deps",
-    "../../gn:protoc_lib",
-    "../../src/profiling/symbolizer",
-    "../../src/profiling/symbolizer:symbolize_database",
-    "../base",
-    "metrics:lib",
-  ]
-  if (enable_perfetto_version_gen) {
-    deps += [ "../../gn/standalone:gen_git_revision" ]
+  perfetto_host_executable("trace_processor_shell") {
+    deps = [
+      ":lib",
+      "../../gn:default_deps",
+      "../../gn:protoc_lib",
+      "../../src/profiling/symbolizer",
+      "../../src/profiling/symbolizer:symbolize_database",
+      "../base",
+      "metrics:lib",
+    ]
+    if (enable_perfetto_version_gen) {
+      deps += [ "../../gn/standalone:gen_git_revision" ]
+    }
+    if (enable_perfetto_trace_processor_linenoise) {
+      deps += [ "../../gn:linenoise" ]
+    }
+    if (enable_perfetto_trace_processor_httpd) {
+      deps += [ "rpc:httpd" ]
+    }
+    sources = [
+      "proto_to_json.cc",
+      "proto_to_json.h",
+      "trace_processor_shell.cc",
+    ]
   }
-  if (enable_perfetto_trace_processor_linenoise) {
-    deps += [ "../../gn:linenoise" ]
-  }
-  if (enable_perfetto_trace_processor_httpd) {
-    deps += [ "rpc:httpd" ]
-  }
-  sources = [
-    "proto_to_json.cc",
-    "proto_to_json.h",
-    "trace_processor_shell.cc",
-  ]
-}
+}  # if (enable_perfetto_trace_processor_sqlite)
 
 perfetto_unittest_source_set("unittests") {
   testonly = true
   sources = [
     "clock_tracker_unittest.cc",
     "event_tracker_unittest.cc",
-    "filtered_row_index_unittest.cc",
     "forwarding_trace_parser_unittest.cc",
     "ftrace_utils_unittest.cc",
     "heap_profile_tracker_unittest.cc",
     "importers/proto/args_table_utils_unittest.cc",
+    "importers/proto/heap_graph_tracker_unittest.cc",
     "importers/proto/heap_graph_walker_unittest.cc",
     "importers/proto/proto_trace_parser_unittest.cc",
     "importers/systrace/systrace_parser_unittest.cc",
@@ -381,18 +387,15 @@
     "protozero_to_text_unittests.cc",
     "sched_slice_table_unittest.cc",
     "slice_tracker_unittest.cc",
-    "span_join_operator_table_unittest.cc",
     "syscall_tracker_unittest.cc",
     "trace_sorter_unittest.cc",
   ]
   deps = [
     ":descriptors",
-    ":lib",
     ":protozero_to_text",
     ":storage_full",
     "../../gn:default_deps",
     "../../gn:gtest_and_gmock",
-    "../../gn:sqlite",
     "../../protos/perfetto/common:zero",
     "../../protos/perfetto/trace:minimal_zero",
     "../../protos/perfetto/trace:zero",
@@ -410,11 +413,22 @@
     "../protozero:testing_messages_zero",
     "containers:unittests",
     "db:unittests",
-    "sqlite",
-    "sqlite:unittests",
     "tables:unittests",
   ]
 
+  if (enable_perfetto_trace_processor_sqlite) {
+    sources += [
+      "filtered_row_index_unittest.cc",
+      "span_join_operator_table_unittest.cc",
+    ]
+    deps += [
+      ":lib",
+      "../../gn:sqlite",
+      "sqlite",
+      "sqlite:unittests",
+    ]
+  }
+
   if (enable_perfetto_trace_processor_json) {
     if (enable_perfetto_trace_processor_json_import) {
       sources += [
@@ -441,21 +455,22 @@
 
 source_set("integrationtests") {
   testonly = true
-  sources = [
-    "trace_database_integrationtest.cc",
-  ]
-  deps = [
-    ":lib",
-    ":storage_full",
-    "../../gn:default_deps",
-    "../../gn:gtest_and_gmock",
-    "../base",
-    "../base:test_support",
-    "sqlite",
-  ]
-
-  if (enable_perfetto_trace_processor_json_import) {
-    deps += [ "../../gn:jsoncpp" ]
+  sources = []
+  deps = []
+  if (enable_perfetto_trace_processor_sqlite) {
+    sources += [ "trace_database_integrationtest.cc" ]
+    deps += [
+      ":lib",
+      ":storage_full",
+      "../../gn:default_deps",
+      "../../gn:gtest_and_gmock",
+      "../base",
+      "../base:test_support",
+      "sqlite",
+    ]
+    if (enable_perfetto_trace_processor_json_import) {
+      deps += [ "../../gn:jsoncpp" ]
+    }
   }
 }
 
diff --git a/src/trace_processor/containers/bit_vector.cc b/src/trace_processor/containers/bit_vector.cc
index 0ec5bcc..a2308f3 100644
--- a/src/trace_processor/containers/bit_vector.cc
+++ b/src/trace_processor/containers/bit_vector.cc
@@ -54,19 +54,19 @@
   return SetBitsIterator(this);
 }
 
-void BitVector::UpdateSetBits(const BitVector& other) {
-  PERFETTO_DCHECK(other.size() == GetNumBitsSet());
+void BitVector::UpdateSetBits(const BitVector& o) {
+  PERFETTO_DCHECK(o.size() <= GetNumBitsSet());
 
-  // For each set bit in this bitvector, we lookup whether |other| has the
-  // bit set. If not, we clear the bit.
+  // For each set bit in this bitvector, we lookup whether the bit in |other|
+  // at that index (if in bounds) is set. If not, we clear the bit.
   for (auto it = IterateSetBits(); it; it.Next()) {
-    if (!other.IsSet(it.ordinal()))
+    if (it.ordinal() >= o.size() || !o.IsSet(it.ordinal()))
       it.Clear();
   }
 
   // After the loop, we should have precisely the same number of bits
   // set as |other|.
-  PERFETTO_DCHECK(GetNumBitsSet() == other.GetNumBitsSet());
+  PERFETTO_DCHECK(o.GetNumBitsSet() == GetNumBitsSet());
 }
 
 }  // namespace trace_processor
diff --git a/src/trace_processor/containers/bit_vector.h b/src/trace_processor/containers/bit_vector.h
index 58b9e23..5ef0f74 100644
--- a/src/trace_processor/containers/bit_vector.h
+++ b/src/trace_processor/containers/bit_vector.h
@@ -107,8 +107,8 @@
     PERFETTO_DCHECK(it != counts_.begin());
 
     // Go back one block to find the block which has the bit we are looking for.
-    uint16_t block_idx =
-        static_cast<uint16_t>(std::distance(counts_.begin(), it) - 1);
+    uint32_t block_idx =
+        static_cast<uint32_t>(std::distance(counts_.begin(), it) - 1);
 
     // Figure out how many set bits forward we are looking inside the block
     // by taking away the number of bits at the start of the block from n.
diff --git a/src/trace_processor/containers/bit_vector_unittest.cc b/src/trace_processor/containers/bit_vector_unittest.cc
index e185afd..b8e7ebb 100644
--- a/src/trace_processor/containers/bit_vector_unittest.cc
+++ b/src/trace_processor/containers/bit_vector_unittest.cc
@@ -237,6 +237,22 @@
   ASSERT_TRUE(bv.IsSet(4));
 }
 
+TEST(BitVectorUnittest, UpdateSetBitsSmallerPicker) {
+  BitVector bv(6, false);
+  bv.Set(1);
+  bv.Set(2);
+  bv.Set(4);
+
+  BitVector picker(2u, true);
+  picker.Clear(1);
+
+  bv.UpdateSetBits(picker);
+
+  ASSERT_TRUE(bv.IsSet(1));
+  ASSERT_FALSE(bv.IsSet(2));
+  ASSERT_FALSE(bv.IsSet(4));
+}
+
 TEST(BitVectorUnittest, IterateAllBitsConst) {
   BitVector bv;
   for (uint32_t i = 0; i < 12345; ++i) {
diff --git a/src/trace_processor/containers/row_map.cc b/src/trace_processor/containers/row_map.cc
index b10d0cb..0a3def0 100644
--- a/src/trace_processor/containers/row_map.cc
+++ b/src/trace_processor/containers/row_map.cc
@@ -36,9 +36,9 @@
                          uint32_t end,
                          const BitVector& selector) {
   PERFETTO_DCHECK(start <= end);
-  PERFETTO_DCHECK(end - start == selector.size());
+  PERFETTO_DCHECK(selector.size() <= end - start);
 
-  // If |start| == 0 and |end - start| == |selector.size()| (which is a
+  // If |start| == 0 and |selector.size()| <= |end - start| (which is a
   // precondition for this function), the BitVector we generate is going to be
   // exactly |selector|.
   //
@@ -49,8 +49,11 @@
   if (start == 0u)
     return RowMap(selector.Copy());
 
+  // We only need to resize to |start| + |selector.size()| as we know any rows
+  // not covered by |selector| are going to be removed below.
   BitVector bv(start, false);
-  bv.Resize(end, true);
+  bv.Resize(start + selector.size(), true);
+
   bv.UpdateSetBits(selector);
   return RowMap(std::move(bv));
 }
diff --git a/src/trace_processor/containers/row_map_unittest.cc b/src/trace_processor/containers/row_map_unittest.cc
index c85b5fe..bd92ae0 100644
--- a/src/trace_processor/containers/row_map_unittest.cc
+++ b/src/trace_processor/containers/row_map_unittest.cc
@@ -195,6 +195,15 @@
   ASSERT_EQ(res.Get(1u), 30u);
 }
 
+TEST(RowMapUnittest, SelectRangeWithSmallBitVector) {
+  RowMap rm(27, 31);
+  RowMap picker(BitVector{false, true});
+  auto res = rm.SelectRows(picker);
+
+  ASSERT_EQ(res.size(), 1u);
+  ASSERT_EQ(res.Get(0u), 28u);
+}
+
 TEST(RowMapUnittest, SelectBitVectorWithBitVector) {
   RowMap rm(BitVector{true, false, true, true, false, true});
   RowMap picker(BitVector{true, false, false, true});
@@ -205,6 +214,15 @@
   ASSERT_EQ(res.Get(1u), 5u);
 }
 
+TEST(RowMapUnittest, SelectBitVectorWithSmallBitVector) {
+  RowMap rm(BitVector{true, false, true, true, false, true});
+  RowMap picker(BitVector{false, true});
+  auto res = rm.SelectRows(picker);
+
+  ASSERT_EQ(res.size(), 1u);
+  ASSERT_EQ(res.Get(0u), 2u);
+}
+
 TEST(RowMapUnittest, SelectIndexVectorWithBitVector) {
   RowMap rm(std::vector<uint32_t>{0u, 2u, 3u, 5u});
   RowMap picker(BitVector{true, false, false, true});
diff --git a/src/trace_processor/containers/string_pool.cc b/src/trace_processor/containers/string_pool.cc
index ac45ceb..fd65195 100644
--- a/src/trace_processor/containers/string_pool.cc
+++ b/src/trace_processor/containers/string_pool.cc
@@ -53,7 +53,7 @@
 
 StringPool::~StringPool() = default;
 
-StringPool::StringPool(StringPool&&) noexcept = default;
+StringPool::StringPool(StringPool&&) = default;
 StringPool& StringPool::operator=(StringPool&&) = default;
 
 StringPool::Id StringPool::InsertString(base::StringView str, uint64_t hash) {
diff --git a/src/trace_processor/containers/string_pool.h b/src/trace_processor/containers/string_pool.h
index 154bfb2..11ae91c 100644
--- a/src/trace_processor/containers/string_pool.h
+++ b/src/trace_processor/containers/string_pool.h
@@ -105,7 +105,7 @@
   ~StringPool();
 
   // Allow std::move().
-  StringPool(StringPool&&) noexcept;
+  StringPool(StringPool&&);
   StringPool& operator=(StringPool&&);
 
   // Disable implicit copy.
diff --git a/src/trace_processor/db/typed_column.h b/src/trace_processor/db/typed_column.h
index 9a3dd5e..5be1dcc 100644
--- a/src/trace_processor/db/typed_column.h
+++ b/src/trace_processor/db/typed_column.h
@@ -67,6 +67,13 @@
     return Column::IndexOf(NumericToSqlValue(v));
   }
 
+  std::vector<T> ToVectorForTesting() const {
+    std::vector<T> result(row_map().size());
+    for (uint32_t i = 0; i < row_map().size(); ++i)
+      result[i] = (*this)[i];
+    return result;
+  }
+
   // Implements equality between two items of type |T|.
   static bool Equals(T a, T b) {
     // We need to use equal_to here as it could be T == double and because we
@@ -104,6 +111,13 @@
   // Inserts the value at the end of the column.
   void Append(base::Optional<T> v) { mutable_sparse_vector<T>()->Append(v); }
 
+  std::vector<base::Optional<T>> ToVectorForTesting() const {
+    std::vector<T> result(row_map().size());
+    for (uint32_t i = 0; i < row_map().size(); ++i)
+      result[i] = (*this)[i];
+    return result;
+  }
+
   // Implements equality between two items of type |T|.
   static bool Equals(base::Optional<T> a, base::Optional<T> b) {
     // We need to use equal_to here as it could be T == double and because we
diff --git a/src/trace_processor/heap_profile_tracker.cc b/src/trace_processor/heap_profile_tracker.cc
index a666dad..4bf0756 100644
--- a/src/trace_processor/heap_profile_tracker.cc
+++ b/src/trace_processor/heap_profile_tracker.cc
@@ -45,23 +45,23 @@
     const SourceAllocation& alloc,
     const StackProfileTracker::InternLookup* intern_lookup) {
   SequenceState& sequence_state = sequence_state_[seq_id];
-  auto maybe_callstack_id =
-      stack_profile_tracker->FindCallstack(alloc.callstack_id, intern_lookup);
+  auto maybe_callstack_id = stack_profile_tracker->FindOrInsertCallstack(
+      alloc.callstack_id, intern_lookup);
   if (!maybe_callstack_id)
     return;
 
-  int64_t callstack_id = *maybe_callstack_id;
+  CallsiteId callstack_id = *maybe_callstack_id;
 
   UniquePid upid = context_->process_tracker->GetOrCreateProcess(
       static_cast<uint32_t>(alloc.pid));
 
   tables::HeapProfileAllocationTable::Row alloc_row{
-      alloc.timestamp, upid, callstack_id,
+      alloc.timestamp, upid, callstack_id.value,
       static_cast<int64_t>(alloc.alloc_count),
       static_cast<int64_t>(alloc.self_allocated)};
 
   tables::HeapProfileAllocationTable::Row free_row{
-      alloc.timestamp, upid, callstack_id,
+      alloc.timestamp, upid, callstack_id.value,
       -static_cast<int64_t>(alloc.free_count),
       -static_cast<int64_t>(alloc.self_freed)};
 
diff --git a/src/trace_processor/heap_profile_tracker.h b/src/trace_processor/heap_profile_tracker.h
index 1d121aa..93a2440 100644
--- a/src/trace_processor/heap_profile_tracker.h
+++ b/src/trace_processor/heap_profile_tracker.h
@@ -77,10 +77,10 @@
   struct SequenceState {
     std::vector<SourceAllocation> pending_allocs;
 
-    std::unordered_map<std::pair<UniquePid, int64_t>,
+    std::unordered_map<std::pair<UniquePid, CallsiteId>,
                        tables::HeapProfileAllocationTable::Row>
         prev_alloc;
-    std::unordered_map<std::pair<UniquePid, int64_t>,
+    std::unordered_map<std::pair<UniquePid, CallsiteId>,
                        tables::HeapProfileAllocationTable::Row>
         prev_free;
 
diff --git a/src/trace_processor/heap_profile_tracker_unittest.cc b/src/trace_processor/heap_profile_tracker_unittest.cc
index 91c3f93..d9d3e1d 100644
--- a/src/trace_processor/heap_profile_tracker_unittest.cc
+++ b/src/trace_processor/heap_profile_tracker_unittest.cc
@@ -181,11 +181,11 @@
 int64_t FindCallstack(const TraceStorage& storage,
                       int64_t depth,
                       int64_t parent,
-                      int64_t frame_id) {
+                      FrameId frame_id) {
   const auto& callsites = storage.stack_profile_callsite_table();
   for (uint32_t i = 0; i < callsites.row_count(); ++i) {
     if (callsites.depth()[i] == depth && callsites.parent_id()[i] == parent &&
-        callsites.frame_id()[i] == frame_id) {
+        callsites.frame_id()[i] == frame_id.value) {
       return static_cast<int64_t>(i);
     }
   }
@@ -327,7 +327,6 @@
     const StackProfileTracker::SourceCallstack& callstack = callstacks[i];
     for (size_t depth = 0; depth < callstack.size(); ++depth) {
       auto frame_id = spt->GetDatabaseFrameIdForTesting(callstack[depth]);
-      ASSERT_NE(frame_id, -1);
       int64_t self = FindCallstack(
           *context.storage, static_cast<int64_t>(depth), parent, frame_id);
       ASSERT_NE(self, -1);
diff --git a/src/trace_processor/importers/proto/heap_graph_module.cc b/src/trace_processor/importers/proto/heap_graph_module.cc
index 49d2f02..c75a340 100644
--- a/src/trace_processor/importers/proto/heap_graph_module.cc
+++ b/src/trace_processor/importers/proto/heap_graph_module.cc
@@ -92,7 +92,7 @@
 using perfetto::protos::pbzero::TracePacket;
 
 HeapGraphModule::HeapGraphModule(TraceProcessorContext* context)
-    : context_(context), heap_graph_tracker_(context) {
+    : context_(context) {
   RegisterForField(TracePacket::kHeapGraphFieldNumber, context);
   RegisterForField(TracePacket::kDeobfuscationMappingFieldNumber, context);
 }
@@ -115,10 +115,11 @@
 void HeapGraphModule::ParseHeapGraph(uint32_t seq_id,
                                      int64_t ts,
                                      protozero::ConstBytes blob) {
+  auto* heap_graph_tracker = HeapGraphTracker::GetOrCreate(context_);
   protos::pbzero::HeapGraph::Decoder heap_graph(blob.data, blob.size);
   UniquePid upid = context_->process_tracker->GetOrCreateProcess(
       static_cast<uint32_t>(heap_graph.pid()));
-  heap_graph_tracker_.SetPacketIndex(seq_id, heap_graph.index());
+  heap_graph_tracker->SetPacketIndex(seq_id, heap_graph.index());
   for (auto it = heap_graph.objects(); it; ++it) {
     protos::pbzero::HeapGraphObject::Decoder object(*it);
     HeapGraphTracker::SourceObject obj;
@@ -156,14 +157,14 @@
       ref.owned_object_id = object_ids[i];
       obj.references.emplace_back(std::move(ref));
     }
-    heap_graph_tracker_.AddObject(seq_id, upid, ts, std::move(obj));
+    heap_graph_tracker->AddObject(seq_id, upid, ts, std::move(obj));
   }
   for (auto it = heap_graph.type_names(); it; ++it) {
     protos::pbzero::InternedString::Decoder entry(*it);
     const char* str = reinterpret_cast<const char*>(entry.str().data);
     auto str_view = base::StringView(str, entry.str().size);
 
-    heap_graph_tracker_.AddInternedTypeName(
+    heap_graph_tracker->AddInternedTypeName(
         seq_id, entry.iid(), context_->storage->InternString(str_view));
   }
   for (auto it = heap_graph.field_names(); it; ++it) {
@@ -171,7 +172,7 @@
     const char* str = reinterpret_cast<const char*>(entry.str().data);
     auto str_view = base::StringView(str, entry.str().size);
 
-    heap_graph_tracker_.AddInternedFieldName(
+    heap_graph_tracker->AddInternedFieldName(
         seq_id, entry.iid(), context_->storage->InternString(str_view));
   }
   for (auto it = heap_graph.roots(); it; ++it) {
@@ -191,15 +192,16 @@
           stats::heap_graph_malformed_packet, static_cast<int>(upid));
       break;
     }
-    heap_graph_tracker_.AddRoot(seq_id, upid, ts, std::move(src_root));
+    heap_graph_tracker->AddRoot(seq_id, upid, ts, std::move(src_root));
   }
   if (!heap_graph.continued()) {
-    heap_graph_tracker_.FinalizeProfile(seq_id);
+    heap_graph_tracker->FinalizeProfile(seq_id);
   }
 }
 
 void HeapGraphModule::ParseDeobfuscationMapping(protozero::ConstBytes blob) {
   // TODO(fmayer): Support multiple profiles in the same trace.
+  auto* heap_graph_tracker = HeapGraphTracker::GetOrCreate(context_);
   protos::pbzero::DeobfuscationMapping::Decoder deobfuscation_mapping(
       blob.data, blob.size);
   for (auto class_it = deobfuscation_mapping.obfuscated_classes(); class_it;
@@ -212,7 +214,7 @@
                     cls.obfuscated_name().ToStdString().c_str());
     } else {
       const std::vector<int64_t>* cls_objects =
-          heap_graph_tracker_.RowsForType(*obfuscated_class_name_id);
+          heap_graph_tracker->RowsForType(*obfuscated_class_name_id);
 
       if (cls_objects) {
         auto interned_deobfuscated_name =
@@ -245,7 +247,7 @@
       }
 
       const std::vector<int64_t>* field_references =
-          heap_graph_tracker_.RowsForField(*obfuscated_field_name_id);
+          heap_graph_tracker->RowsForField(*obfuscated_field_name_id);
       if (field_references) {
         auto interned_deobfuscated_name = context_->storage->InternString(
             base::StringView(merged_deobfuscated));
diff --git a/src/trace_processor/importers/proto/heap_graph_module.h b/src/trace_processor/importers/proto/heap_graph_module.h
index 492ba60..2fde80e 100644
--- a/src/trace_processor/importers/proto/heap_graph_module.h
+++ b/src/trace_processor/importers/proto/heap_graph_module.h
@@ -40,7 +40,6 @@
   void ParseDeobfuscationMapping(protozero::ConstBytes);
 
   TraceProcessorContext* context_;
-  HeapGraphTracker heap_graph_tracker_;
 };
 
 }  // namespace trace_processor
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker.cc b/src/trace_processor/importers/proto/heap_graph_tracker.cc
index 3c1001f..124fab9 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker.cc
+++ b/src/trace_processor/importers/proto/heap_graph_tracker.cc
@@ -187,25 +187,29 @@
     }
   }
 
-  auto* mapping_table =
-      context_->storage->mutable_stack_profile_mapping_table();
-
-  tables::StackProfileMappingTable::Row mapping_row{};
-  mapping_row.name = context_->storage->InternString("JAVA");
-  MappingId mapping_id = mapping_table->Insert(mapping_row);
-
-  uint32_t mapping_idx = *mapping_table->id().IndexOf(mapping_id);
-
   auto paths = sequence_state.walker.FindPathsFromRoot();
-  WriteFlamegraph(sequence_state, paths, mapping_idx);
+  walkers_.emplace(
+      std::make_pair(sequence_state.current_upid, sequence_state.current_ts),
+      std::move(sequence_state.walker));
 
   sequence_state_.erase(seq_id);
 }
 
-void HeapGraphTracker::WriteFlamegraph(
-    const SequenceState& sequence_state,
-    const HeapGraphWalker::PathFromRoot& init_path,
-    uint32_t mapping_row) {
+std::unique_ptr<tables::ExperimentalFlamegraphNodesTable>
+HeapGraphTracker::BuildFlamegraph(const int64_t current_ts,
+                                  const UniquePid current_upid) {
+  auto it = walkers_.find(std::make_pair(current_upid, current_ts));
+  if (it == walkers_.end())
+    return nullptr;
+
+  std::unique_ptr<tables::ExperimentalFlamegraphNodesTable> tbl(
+      new tables::ExperimentalFlamegraphNodesTable(
+          context_->storage->mutable_string_pool(), nullptr));
+
+  HeapGraphWalker::PathFromRoot init_path = it->second.FindPathsFromRoot();
+  auto profile_type = context_->storage->InternString("graph");
+  auto java_mapping = context_->storage->InternString("JAVA");
+
   std::vector<int32_t> node_to_cumulative_size(init_path.nodes.size());
   std::vector<int32_t> node_to_cumulative_count(init_path.nodes.size());
   // i > 0 is to skip the artifical root node.
@@ -218,41 +222,32 @@
     node_to_cumulative_count[node.parent_id] += node_to_cumulative_count[i];
   }
 
-  std::vector<int32_t> node_to_row_id(init_path.nodes.size());
-  node_to_row_id[0] = -1;  // We use parent_id -1 for roots.
+  std::vector<uint32_t> node_to_row_idx(init_path.nodes.size());
   // i = 1 is to skip the artifical root node.
   for (size_t i = 1; i < init_path.nodes.size(); ++i) {
     const HeapGraphWalker::PathFromRoot::Node& node = init_path.nodes[i];
     PERFETTO_CHECK(node.parent_id < i);
-    const int32_t parent_row_id = node_to_row_id[node.parent_id];
+    base::Optional<uint32_t> parent_id;
+    if (node.parent_id != 0)
+      parent_id = node_to_row_idx[node.parent_id];
     const uint32_t depth = node.depth - 1;  // -1 because we do not have the
                                             // artificial root in the database.
 
-    tables::StackProfileFrameTable::Row row{};
-    PERFETTO_CHECK(node.class_name > 0);
-    row.name = StringId::Raw(static_cast<uint32_t>(node.class_name));
-    row.mapping = mapping_row;
-
-    auto id =
-        context_->storage->mutable_stack_profile_frame_table()->Insert(row);
-    int32_t frame_id = static_cast<int32_t>(id.value);
-
-    auto* callsites = context_->storage->mutable_stack_profile_callsite_table();
-    auto callsite_id = callsites->Insert({depth, parent_row_id, frame_id});
-    int32_t row_id = static_cast<int32_t>(callsite_id.value);
-    node_to_row_id[i] = row_id;
-
     tables::ExperimentalFlamegraphNodesTable::Row alloc_row{
-        sequence_state.current_ts,
-        sequence_state.current_upid,
-        row_id,
+        current_ts,
+        current_upid,
+        profile_type,
+        depth,
+        StringId::Raw(static_cast<uint32_t>(node.class_name)),
+        java_mapping,
         static_cast<int64_t>(node.count),
         static_cast<int64_t>(node_to_cumulative_count[i]),
         static_cast<int64_t>(node.size),
-        static_cast<int64_t>(node_to_cumulative_size[i])};
-    context_->storage->mutable_experimental_flamegraph_nodes_table()->Insert(
-        alloc_row);
+        static_cast<int64_t>(node_to_cumulative_size[i]),
+        parent_id};
+    node_to_row_idx[i] = *tbl->id().IndexOf(tbl->Insert(alloc_row));
   }
+  return tbl;
 }
 
 void HeapGraphTracker::MarkReachable(int64_t row) {
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker.h b/src/trace_processor/importers/proto/heap_graph_tracker.h
index 71bbb8f..211b924 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker.h
+++ b/src/trace_processor/importers/proto/heap_graph_tracker.h
@@ -32,7 +32,7 @@
 
 class TraceProcessorContext;
 
-class HeapGraphTracker : public HeapGraphWalker::Delegate {
+class HeapGraphTracker : public HeapGraphWalker::Delegate, public Destructible {
  public:
   struct SourceObject {
     // All ids in this are in the trace iid space, not in the trace processor
@@ -54,6 +54,13 @@
 
   explicit HeapGraphTracker(TraceProcessorContext* context);
 
+  static HeapGraphTracker* GetOrCreate(TraceProcessorContext* context) {
+    if (!context->heap_graph_tracker) {
+      context->heap_graph_tracker.reset(new HeapGraphTracker(context));
+    }
+    return static_cast<HeapGraphTracker*>(context->heap_graph_tracker.get());
+  }
+
   void AddRoot(uint32_t seq_id, UniquePid upid, int64_t ts, SourceRoot root);
   void AddObject(uint32_t seq_id, UniquePid upid, int64_t ts, SourceObject obj);
   void AddInternedTypeName(uint32_t seq_id,
@@ -86,6 +93,10 @@
     return &it->second;
   }
 
+  std::unique_ptr<tables::ExperimentalFlamegraphNodesTable> BuildFlamegraph(
+      const int64_t current_ts,
+      const UniquePid current_upid);
+
  private:
   struct SequenceState {
     SequenceState(HeapGraphTracker* tracker) : walker(tracker) {}
@@ -104,12 +115,10 @@
   SequenceState& GetOrCreateSequence(uint32_t seq_id);
   bool SetPidAndTimestamp(SequenceState* seq, UniquePid upid, int64_t ts);
 
-  void WriteFlamegraph(const SequenceState& sequence_state,
-                       const HeapGraphWalker::PathFromRoot& path,
-                       uint32_t mapping_row);
 
   TraceProcessorContext* const context_;
   std::map<uint32_t, SequenceState> sequence_state_;
+  std::map<std::pair<UniquePid, int64_t /* ts */>, HeapGraphWalker> walkers_;
 
   std::map<StringPool::Id, std::vector<int64_t>> class_to_rows_;
   std::map<StringPool::Id, std::vector<int64_t>> field_to_rows_;
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc b/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
new file mode 100644
index 0000000..3cb5c2a
--- /dev/null
+++ b/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/importers/proto/heap_graph_tracker.h"
+
+#include "perfetto/base/logging.h"
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto {
+namespace trace_processor {
+namespace {
+
+using ::testing::UnorderedElementsAre;
+
+TEST(HeapGraphTrackerTest, BuildFlamegraph) {
+  //           4@A 5@B
+  //             \ /
+  //         2@Y 3@Y
+  //           \ /
+  //           1@X
+
+  constexpr uint64_t kSeqId = 1;
+  constexpr UniquePid kPid = 1;
+  constexpr int64_t kTimestamp = 1;
+
+  TraceProcessorContext context;
+  context.storage.reset(new TraceStorage());
+
+  HeapGraphTracker tracker(&context);
+
+  constexpr uint64_t kField = 1;
+
+  constexpr uint64_t kX = 1;
+  constexpr uint64_t kY = 2;
+  constexpr uint64_t kA = 3;
+  constexpr uint64_t kB = 4;
+
+  StringPool::Id field = context.storage->InternString("foo");
+  StringPool::Id x = context.storage->InternString("X");
+  StringPool::Id y = context.storage->InternString("Y");
+  StringPool::Id a = context.storage->InternString("A");
+  StringPool::Id b = context.storage->InternString("B");
+
+  tracker.AddInternedFieldName(kSeqId, kField, field);
+
+  tracker.AddInternedTypeName(kSeqId, kX, x);
+  tracker.AddInternedTypeName(kSeqId, kY, y);
+  tracker.AddInternedTypeName(kSeqId, kA, a);
+  tracker.AddInternedTypeName(kSeqId, kB, b);
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 1;
+    obj.self_size = 1;
+    obj.type_id = kX;
+    HeapGraphTracker::SourceObject::Reference ref;
+    ref.field_name_id = kField;
+    ref.owned_object_id = 2;
+    obj.references.emplace_back(std::move(ref));
+
+    ref.field_name_id = kField;
+    ref.owned_object_id = 3;
+    obj.references.emplace_back(std::move(ref));
+
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 2;
+    obj.self_size = 2;
+    obj.type_id = kY;
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 3;
+    obj.self_size = 3;
+    obj.type_id = kY;
+    HeapGraphTracker::SourceObject::Reference ref;
+    ref.field_name_id = kField;
+    ref.owned_object_id = 4;
+    obj.references.emplace_back(std::move(ref));
+
+    ref.field_name_id = kField;
+    ref.owned_object_id = 5;
+    obj.references.emplace_back(std::move(ref));
+
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 4;
+    obj.self_size = 4;
+    obj.type_id = kA;
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 5;
+    obj.self_size = 5;
+    obj.type_id = kB;
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  HeapGraphTracker::SourceRoot root;
+  root.root_type = context.storage->InternString("ROOT");
+  root.object_ids.emplace_back(1);
+  tracker.AddRoot(kSeqId, kPid, kTimestamp, root);
+
+  tracker.FinalizeProfile(kSeqId);
+  std::unique_ptr<tables::ExperimentalFlamegraphNodesTable> flame =
+      tracker.BuildFlamegraph(kPid, kTimestamp);
+  ASSERT_NE(flame, nullptr);
+
+  auto cumulative_sizes = flame->cumulative_size().ToVectorForTesting();
+  EXPECT_THAT(cumulative_sizes, UnorderedElementsAre(15, 4, 14, 5));
+
+  auto cumulative_counts = flame->cumulative_count().ToVectorForTesting();
+  EXPECT_THAT(cumulative_counts, UnorderedElementsAre(5, 4, 1, 1));
+
+  auto sizes = flame->size().ToVectorForTesting();
+  EXPECT_THAT(sizes, UnorderedElementsAre(1, 5, 4, 5));
+
+  auto counts = flame->count().ToVectorForTesting();
+  EXPECT_THAT(counts, UnorderedElementsAre(1, 2, 1, 1));
+}
+
+}  // namespace
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/importers/proto/packet_sequence_state.h b/src/trace_processor/importers/proto/packet_sequence_state.h
index 903f99d..81ec6e0 100644
--- a/src/trace_processor/importers/proto/packet_sequence_state.h
+++ b/src/trace_processor/importers/proto/packet_sequence_state.h
@@ -47,7 +47,7 @@
  public:
   InternedMessageView(TraceBlobView msg) : message_(std::move(msg)) {}
 
-  InternedMessageView(InternedMessageView&&) noexcept = default;
+  InternedMessageView(InternedMessageView&&) = default;
   InternedMessageView& operator=(InternedMessageView&&) = default;
 
   // Allow copy by cloning the TraceBlobView. This is required for
diff --git a/src/trace_processor/importers/proto/proto_trace_parser.cc b/src/trace_processor/importers/proto/proto_trace_parser.cc
index b2b852f..4c5566d 100644
--- a/src/trace_processor/importers/proto/proto_trace_parser.cc
+++ b/src/trace_processor/importers/proto/proto_trace_parser.cc
@@ -445,15 +445,15 @@
       break;
     }
 
-    auto maybe_callstack_id =
-        stack_profile_tracker.FindCallstack(*callstack_it, &intern_lookup);
+    auto maybe_callstack_id = stack_profile_tracker.FindOrInsertCallstack(
+        *callstack_it, &intern_lookup);
     if (!maybe_callstack_id) {
       context_->storage->IncrementStats(stats::stackprofile_parser_error);
       PERFETTO_ELOG("StreamingProfilePacket referencing invalid callstack!");
       continue;
     }
 
-    int64_t callstack_id = *maybe_callstack_id;
+    uint32_t callstack_id = maybe_callstack_id->value;
 
     tables::CpuProfileStackSampleTable::Row sample_row{
         sequence_state->state()->IncrementAndGetTrackEventTimeNs(*timestamp_it *
@@ -631,10 +631,10 @@
   protos::pbzero::ModuleSymbols::Decoder module_symbols(blob.data, blob.size);
   std::string hex_build_id = base::ToHex(module_symbols.build_id().data,
                                          module_symbols.build_id().size);
-  auto mapping_rows = context_->storage->FindMappingRow(
+  auto mapping_ids = context_->storage->FindMappingRow(
       context_->storage->InternString(module_symbols.path()),
       context_->storage->InternString(base::StringView(hex_build_id)));
-  if (mapping_rows.empty()) {
+  if (mapping_ids.empty()) {
     context_->storage->IncrementStats(stats::stackprofile_invalid_mapping_id);
     return;
   }
@@ -643,16 +643,14 @@
 
     uint32_t symbol_set_id = context_->storage->symbol_table().row_count();
     bool frame_found = false;
-    for (int64_t mapping_row : mapping_rows) {
-      std::vector<int64_t> frame_rows = context_->storage->FindFrameRow(
-          static_cast<size_t>(mapping_row), address_symbols.address());
+    for (MappingId mapping_id : mapping_ids) {
+      std::vector<FrameId> frame_ids = context_->storage->FindFrameIds(
+          mapping_id, address_symbols.address());
 
-      for (const int64_t frame_row : frame_rows) {
-        PERFETTO_DCHECK(frame_row >= 0);
-
-        uint32_t row_idx = static_cast<uint32_t>(frame_row);
+      for (const FrameId frame_id : frame_ids) {
         auto* frames = context_->storage->mutable_stack_profile_frame_table();
-        frames->mutable_symbol_set_id()->Set(row_idx, symbol_set_id);
+        uint32_t frame_row = *frames->id().IndexOf(frame_id);
+        frames->mutable_symbol_set_id()->Set(frame_row, symbol_set_id);
         frame_found = true;
       }
     }
diff --git a/src/trace_processor/importers/proto/track_event_parser.cc b/src/trace_processor/importers/proto/track_event_parser.cc
index cb88ec7..2ca1a28 100644
--- a/src/trace_processor/importers/proto/track_event_parser.cc
+++ b/src/trace_processor/importers/proto/track_event_parser.cc
@@ -457,8 +457,12 @@
   base::Optional<UniqueTid> upid;
 
   // Determine track from track_uuid specified in either TrackEvent or
-  // TrackEventDefaults. If none is set, fall back to the track specified by the
-  // sequence's (or event's) pid + tid or a default track.
+  // TrackEventDefaults. If a non-default track is not set, we either:
+  //   a) fall back to the track specified by the sequence's (or event's) pid +
+  //      tid (only in case of legacy tracks/events, i.e. events that don't
+  //      specify an explicit track uuid or use legacy event phases instead of
+  //      TrackEvent types), or
+  //   b) a default track.
   if (track_uuid) {
     base::Optional<TrackId> opt_track_id =
         track_tracker->GetDescriptorTrack(track_uuid);
@@ -480,9 +484,10 @@
       if (process_track_row)
         upid = storage->process_track_table().upid()[*process_track_row];
     }
-  } else if (sequence_state->state()->pid_and_tid_valid() ||
-             (legacy_event.has_pid_override() &&
-              legacy_event.has_tid_override())) {
+  } else if ((!event.has_track_uuid() || !event.has_type()) &&
+             (sequence_state->state()->pid_and_tid_valid() ||
+              (legacy_event.has_pid_override() &&
+               legacy_event.has_tid_override()))) {
     uint32_t pid = static_cast<uint32_t>(sequence_state->state()->pid());
     uint32_t tid = static_cast<uint32_t>(sequence_state->state()->tid());
     if (legacy_event.has_pid_override())
diff --git a/src/trace_processor/importers/systrace/systrace_trace_parser.cc b/src/trace_processor/importers/systrace/systrace_trace_parser.cc
index 2ffaac4..d23581c 100644
--- a/src/trace_processor/importers/systrace/systrace_trace_parser.cc
+++ b/src/trace_processor/importers/systrace/systrace_trace_parser.cc
@@ -27,6 +27,7 @@
 #include "src/trace_processor/track_tracker.h"
 
 #include <inttypes.h>
+#include <cctype>
 #include <string>
 #include <unordered_map>
 
@@ -37,9 +38,9 @@
 std::string SubstrTrim(const std::string& input) {
   std::string s = input;
   s.erase(s.begin(), std::find_if(s.begin(), s.end(),
-                                  [](int ch) { return !std::isspace(ch); }));
+                                  [](char ch) { return !std::isspace(ch); }));
   s.erase(std::find_if(s.rbegin(), s.rend(),
-                       [](int ch) { return !std::isspace(ch); })
+                       [](char ch) { return !std::isspace(ch); })
               .base(),
           s.end());
   return s;
diff --git a/src/trace_processor/metrics/BUILD.gn b/src/trace_processor/metrics/BUILD.gn
index 537f55e..0d2db64 100644
--- a/src/trace_processor/metrics/BUILD.gn
+++ b/src/trace_processor/metrics/BUILD.gn
@@ -62,40 +62,42 @@
   public_configs = [ ":gen_config" ]
 }
 
-source_set("lib") {
-  sources = [
-    "metrics.cc",
-    "metrics.descriptor.h",
-    "metrics.h",
-  ]
-  deps = [
-    "../../../gn:default_deps",
-    "../../../gn:sqlite",
-    "../../../include/perfetto/trace_processor",
-    "../../../protos/perfetto/common:zero",
-    "../../../protos/perfetto/metrics:zero",
-    "../../../protos/perfetto/metrics/android:zero",
-    "../../../protos/perfetto/trace_processor:metrics_impl_zero",
-    "../../base",
-    "../../protozero:protozero",
-    "../sqlite",
-  ]
-  public_deps = [
-    ":gen_merged_sql_metrics",
-    "../../trace_processor:descriptors",
-  ]
-}
+if (enable_perfetto_trace_processor_sqlite) {
+  source_set("lib") {
+    sources = [
+      "metrics.cc",
+      "metrics.descriptor.h",
+      "metrics.h",
+    ]
+    deps = [
+      "../../../gn:default_deps",
+      "../../../gn:sqlite",
+      "../../../include/perfetto/trace_processor",
+      "../../../protos/perfetto/common:zero",
+      "../../../protos/perfetto/metrics:zero",
+      "../../../protos/perfetto/metrics/android:zero",
+      "../../../protos/perfetto/trace_processor:metrics_impl_zero",
+      "../../base",
+      "../../protozero:protozero",
+      "../sqlite",
+    ]
+    public_deps = [
+      ":gen_merged_sql_metrics",
+      "../../trace_processor:descriptors",
+    ]
+  }
 
-perfetto_unittest_source_set("unittests") {
-  testonly = true
-  sources = [
-    "metrics_unittest.cc",
-  ]
-  deps = [
-    ":lib",
-    "../../../gn:default_deps",
-    "../../../gn:gtest_and_gmock",
-    "../../../gn:sqlite",
-    "../../../protos/perfetto/common:zero",
-  ]
+  perfetto_unittest_source_set("unittests") {
+    testonly = true
+    sources = [
+      "metrics_unittest.cc",
+    ]
+    deps = [
+      ":lib",
+      "../../../gn:default_deps",
+      "../../../gn:gtest_and_gmock",
+      "../../../gn:sqlite",
+      "../../../protos/perfetto/common:zero",
+    ]
+  }
 }
diff --git a/src/trace_processor/slice_tracker.cc b/src/trace_processor/slice_tracker.cc
index a9323f5..39f7d96 100644
--- a/src/trace_processor/slice_tracker.cc
+++ b/src/trace_processor/slice_tracker.cc
@@ -291,7 +291,14 @@
     hash.Update(slices.category()[slice_idx]);
     hash.Update(slices.name()[slice_idx]);
   }
-  return static_cast<int64_t>(hash.digest());
+
+  // For clients which don't have an integer type (i.e. Javascript), returning
+  // hashes which have the top 11 bits set leads to numbers which are
+  // unrepresenatble. This means that clients cannot filter using this number as
+  // it will be meaningless when passed back to us. For this reason, make sure
+  // that the hash is always less than 2^53 - 1.
+  constexpr uint64_t kSafeBitmask = (1ull << 53) - 1;
+  return static_cast<int64_t>(hash.digest() & kSafeBitmask);
 }
 
 }  // namespace trace_processor
diff --git a/src/trace_processor/sqlite/BUILD.gn b/src/trace_processor/sqlite/BUILD.gn
index 5a81353..418e517 100644
--- a/src/trace_processor/sqlite/BUILD.gn
+++ b/src/trace_processor/sqlite/BUILD.gn
@@ -14,42 +14,44 @@
 
 import("../../../gn/test.gni")
 
-source_set("sqlite") {
-  sources = [
-    "db_sqlite_table.cc",
-    "db_sqlite_table.h",
-    "query_constraints.cc",
-    "query_constraints.h",
-    "scoped_db.h",
-    "sqlite3_str_split.cc",
-    "sqlite3_str_split.h",
-    "sqlite_table.cc",
-    "sqlite_table.h",
-    "sqlite_utils.h",
-  ]
-  deps = [
-    "../../../gn:default_deps",
-    "../../../gn:sqlite",
-    "../../../include/perfetto/trace_processor",
-    "../../../protos/perfetto/trace/ftrace:zero",
-    "../../base",
-    "../db:lib",
-    "../types",
-  ]
-}
+if (enable_perfetto_trace_processor_sqlite) {
+  source_set("sqlite") {
+    sources = [
+      "db_sqlite_table.cc",
+      "db_sqlite_table.h",
+      "query_constraints.cc",
+      "query_constraints.h",
+      "scoped_db.h",
+      "sqlite3_str_split.cc",
+      "sqlite3_str_split.h",
+      "sqlite_table.cc",
+      "sqlite_table.h",
+      "sqlite_utils.h",
+    ]
+    deps = [
+      "../../../gn:default_deps",
+      "../../../gn:sqlite",
+      "../../../include/perfetto/trace_processor",
+      "../../../protos/perfetto/trace/ftrace:zero",
+      "../../base",
+      "../db:lib",
+      "../types",
+    ]
+  }
 
-perfetto_unittest_source_set("unittests") {
-  testonly = true
-  sources = [
-    "db_sqlite_table_unittest.cc",
-    "query_constraints_unittest.cc",
-    "sqlite3_str_split_unittest.cc",
-  ]
-  deps = [
-    ":sqlite",
-    "../../../gn:default_deps",
-    "../../../gn:gtest_and_gmock",
-    "../../../gn:sqlite",
-    "../../base",
-  ]
+  perfetto_unittest_source_set("unittests") {
+    testonly = true
+    sources = [
+      "db_sqlite_table_unittest.cc",
+      "query_constraints_unittest.cc",
+      "sqlite3_str_split_unittest.cc",
+    ]
+    deps = [
+      ":sqlite",
+      "../../../gn:default_deps",
+      "../../../gn:gtest_and_gmock",
+      "../../../gn:sqlite",
+      "../../base",
+    ]
+  }
 }
diff --git a/src/trace_processor/sqlite/db_sqlite_table.cc b/src/trace_processor/sqlite/db_sqlite_table.cc
index 09c113d..054f387 100644
--- a/src/trace_processor/sqlite/db_sqlite_table.cc
+++ b/src/trace_processor/sqlite/db_sqlite_table.cc
@@ -93,26 +93,31 @@
 }
 
 util::Status DbSqliteTable::Init(int, const char* const*, Schema* schema) {
+  *schema = ComputeSchema(*table_, name().c_str());
+  return util::OkStatus();
+}
+
+SqliteTable::Schema DbSqliteTable::ComputeSchema(const Table& table,
+                                                 const char* table_name) {
   std::vector<SqliteTable::Column> schema_cols;
-  for (uint32_t i = 0; i < table_->GetColumnCount(); ++i) {
-    const auto& col = table_->GetColumn(i);
+  for (uint32_t i = 0; i < table.GetColumnCount(); ++i) {
+    const auto& col = table.GetColumn(i);
     schema_cols.emplace_back(i, col.name(), col.type());
   }
+
   // TODO(lalitm): this is hardcoded to be the id column but change this to be
   // more generic in the future.
-  const auto* col = table_->GetColumnByName("id");
+  const auto* col = table.GetColumnByName("id");
   if (!col) {
     PERFETTO_FATAL(
         "id column not found in %s. Currently all db Tables need to contain an "
         "id column; this constraint will be relaxed in the future.",
-        name().c_str());
+        table_name);
   }
 
   std::vector<size_t> primary_keys;
   primary_keys.emplace_back(col->index_in_table());
-
-  *schema = Schema(std::move(schema_cols), std::move(primary_keys));
-  return util::OkStatus();
+  return Schema(std::move(schema_cols), std::move(primary_keys));
 }
 
 int DbSqliteTable::BestIndex(const QueryConstraints& qc, BestIndexInfo* info) {
@@ -252,11 +257,50 @@
 }
 
 std::unique_ptr<SqliteTable::Cursor> DbSqliteTable::CreateCursor() {
-  return std::unique_ptr<Cursor>(new Cursor(this));
+  return std::unique_ptr<Cursor>(new Cursor(this, table_));
 }
 
-DbSqliteTable::Cursor::Cursor(DbSqliteTable* table)
-    : SqliteTable::Cursor(table), initial_db_table_(table->table_) {}
+DbSqliteTable::Cursor::Cursor(SqliteTable* sqlite_table, const Table* table)
+    : SqliteTable::Cursor(sqlite_table), initial_db_table_(table) {}
+
+void DbSqliteTable::Cursor::TryCacheCreateSortedTable(
+    const QueryConstraints& qc,
+    FilterHistory history) {
+  if (history == FilterHistory::kDifferent) {
+    // Every time we get a new constraint set, reset the state of any caching
+    // structures.
+    sorted_cache_table_ = base::nullopt;
+    repeated_cache_count_ = 0;
+    return;
+  }
+
+  PERFETTO_DCHECK(history == FilterHistory::kSame);
+
+  // Only try and create the cached table on exactly the third time we see this
+  // constraint set.
+  constexpr uint32_t kRepeatedThreshold = 3;
+  if (repeated_cache_count_++ != kRepeatedThreshold)
+    return;
+
+  // If we have more than one constraint, we can't cache the table using
+  // this method.
+  if (qc.constraints().size() != 1)
+    return;
+
+  // If the constraing is not an equality constraint, there's little
+  // benefit to caching
+  const auto& c = qc.constraints().front();
+  if (!sqlite_utils::IsOpEq(c.op))
+    return;
+
+  // If the column is already sorted, we don't need to cache at all.
+  uint32_t col = static_cast<uint32_t>(c.column);
+  if (initial_db_table_->GetColumn(col).IsSorted())
+    return;
+
+  // Create the cached table, sorting on the column which has the constraint.
+  sorted_cache_table_ = initial_db_table_->Sort({Order{col, false}});
+}
 
 int DbSqliteTable::Cursor::Filter(const QueryConstraints& qc,
                                   sqlite3_value** argv,
@@ -265,23 +309,9 @@
   // before the table's destructor.
   iterator_ = base::nullopt;
 
-  if (history == FilterHistory::kSame && qc.constraints().size() == 1 &&
-      sqlite_utils::IsOpEq(qc.constraints().front().op)) {
-    // If we've seen the same constraint set with a single equality constraint
-    // more than |kRepeatedThreshold| times, we assume we will see it more
-    // in the future and thus cache a table sorted on the column. That way,
-    // future equality constraints can binary search for the value instead of
-    // doing a full table scan.
-    constexpr uint32_t kRepeatedThreshold = 3;
-    if (!sorted_cache_table_ && repeated_cache_count_++ > kRepeatedThreshold) {
-      const auto& c = qc.constraints().front();
-      uint32_t col = static_cast<uint32_t>(c.column);
-      sorted_cache_table_ = initial_db_table_->Sort({Order{col, false}});
-    }
-  } else {
-    sorted_cache_table_ = base::nullopt;
-    repeated_cache_count_ = 0;
-  }
+  // Tries to create a sorted cached table which can be used to speed up
+  // filters below.
+  TryCacheCreateSortedTable(qc, history);
 
   // We reuse this vector to reduce memory allocations on nested subqueries.
   constraints_.resize(qc.constraints().size());
diff --git a/src/trace_processor/sqlite/db_sqlite_table.h b/src/trace_processor/sqlite/db_sqlite_table.h
index 1e39653..2a606ec 100644
--- a/src/trace_processor/sqlite/db_sqlite_table.h
+++ b/src/trace_processor/sqlite/db_sqlite_table.h
@@ -26,9 +26,9 @@
 // Implements the SQLite table interface for db tables.
 class DbSqliteTable : public SqliteTable {
  public:
-  class Cursor final : public SqliteTable::Cursor {
+  class Cursor : public SqliteTable::Cursor {
    public:
-    explicit Cursor(DbSqliteTable* table);
+    Cursor(SqliteTable*, const Table* table);
 
     Cursor(Cursor&&) noexcept = default;
     Cursor& operator=(Cursor&&) = default;
@@ -41,12 +41,22 @@
     int Eof() override;
     int Column(sqlite3_context*, int N) override;
 
+   protected:
+    // Sets the table this class uses as the reference for all filter
+    // operations. Should be immediately followed by a call to Filter with
+    // |FilterHistory::kDifferent|.
+    void set_table(const Table* table) { initial_db_table_ = table; }
+
    private:
     enum class Mode {
       kSingleRow,
       kTable,
     };
 
+    // Tries to create a sorted table to cache in |sorted_cache_table_| if the
+    // constraint set matches the requirements.
+    void TryCacheCreateSortedTable(const QueryConstraints&, FilterHistory);
+
     const Table* SourceTable() const {
       // Try and use the sorted cache table (if it exists) to speed up the
       // sorting. Otherwise, just use the original table.
@@ -101,6 +111,9 @@
   int ModifyConstraints(QueryConstraints*) override;
   int BestIndex(const QueryConstraints&, BestIndexInfo*) override;
 
+  static SqliteTable::Schema ComputeSchema(const Table& table,
+                                           const char* table_name);
+
   // static for testing.
   static QueryCost EstimateCost(const Table& table, const QueryConstraints& qc);
 
diff --git a/src/trace_processor/sqlite/sqlite_table.h b/src/trace_processor/sqlite/sqlite_table.h
index a5f95e1..2cb7a0f 100644
--- a/src/trace_processor/sqlite/sqlite_table.h
+++ b/src/trace_processor/sqlite/sqlite_table.h
@@ -55,7 +55,9 @@
     size_t index() const { return index_; }
     const std::string& name() const { return name_; }
     SqlValue::Type type() const { return type_; }
+
     bool hidden() const { return hidden_; }
+    void set_hidden(bool hidden) { hidden_ = hidden; }
 
    private:
     size_t index_ = 0;
@@ -137,6 +139,8 @@
     std::string ToCreateTableStmt() const;
 
     const std::vector<Column>& columns() const { return columns_; }
+    std::vector<Column>* mutable_columns() { return &columns_; }
+
     const std::vector<size_t> primary_keys() { return primary_keys_; }
 
    private:
diff --git a/src/trace_processor/sqlite_experimental_flamegraph_table.cc b/src/trace_processor/sqlite_experimental_flamegraph_table.cc
new file mode 100644
index 0000000..68dd50f
--- /dev/null
+++ b/src/trace_processor/sqlite_experimental_flamegraph_table.cc
@@ -0,0 +1,174 @@
+
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/sqlite_experimental_flamegraph_table.h"
+
+#include "src/trace_processor/importers/proto/heap_graph_tracker.h"
+#include "src/trace_processor/trace_processor_context.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+namespace {
+
+SqliteExperimentalFlamegraphTable::InputValues GetInputValues(
+    const QueryConstraints& qc,
+    sqlite3_value** argv) {
+  using T = tables::ExperimentalFlamegraphNodesTable;
+
+  const auto& cs = qc.constraints();
+
+  auto ts_fn = [](const QueryConstraints::Constraint& c) {
+    return c.column == static_cast<int>(T::ColumnIndex::ts) &&
+           c.op == SQLITE_INDEX_CONSTRAINT_EQ;
+  };
+  auto upid_fn = [](const QueryConstraints::Constraint& c) {
+    return c.column == static_cast<int>(T::ColumnIndex::upid) &&
+           c.op == SQLITE_INDEX_CONSTRAINT_EQ;
+  };
+  auto profile_type_fn = [](const QueryConstraints::Constraint& c) {
+    return c.column == static_cast<int>(T::ColumnIndex::profile_type) &&
+           c.op == SQLITE_INDEX_CONSTRAINT_EQ;
+  };
+
+  auto ts_idx = static_cast<uint32_t>(
+      std::distance(cs.begin(), std::find_if(cs.begin(), cs.end(), ts_fn)));
+  auto upid_idx = static_cast<uint32_t>(
+      std::distance(cs.begin(), std::find_if(cs.begin(), cs.end(), upid_fn)));
+  auto profile_type_idx = static_cast<uint32_t>(std::distance(
+      cs.begin(), std::find_if(cs.begin(), cs.end(), profile_type_fn)));
+
+  // We should always have valid indices here because BestIndex should only
+  // allow the constraint set to be chosen when we have an equality constraint
+  // on both ts and upid.
+  PERFETTO_CHECK(ts_idx < cs.size());
+  PERFETTO_CHECK(upid_idx < cs.size());
+  PERFETTO_CHECK(profile_type_idx < cs.size());
+
+  int64_t ts = sqlite3_value_int64(argv[ts_idx]);
+  UniquePid upid = static_cast<UniquePid>(sqlite3_value_int64(argv[upid_idx]));
+  std::string profile_type =
+      reinterpret_cast<const char*>(sqlite3_value_text(argv[profile_type_idx]));
+
+  return SqliteExperimentalFlamegraphTable::InputValues{ts, upid, profile_type};
+}
+
+}  // namespace
+
+SqliteExperimentalFlamegraphTable::SqliteExperimentalFlamegraphTable(
+    sqlite3*,
+    TraceProcessorContext* context)
+    : context_(context) {}
+
+SqliteExperimentalFlamegraphTable::~SqliteExperimentalFlamegraphTable() =
+    default;
+
+void SqliteExperimentalFlamegraphTable::RegisterTable(
+    sqlite3* db,
+    TraceProcessorContext* context) {
+  SqliteTable::Register<SqliteExperimentalFlamegraphTable>(
+      db, context, "experimental_flamegraph");
+}
+
+util::Status SqliteExperimentalFlamegraphTable::Init(
+    int,
+    const char* const*,
+    SqliteTable::Schema* schema) {
+  // Create an empty table for the sake of getting the schema.
+  tables::ExperimentalFlamegraphNodesTable table(nullptr, nullptr);
+  *schema = DbSqliteTable::ComputeSchema(table, name().c_str());
+
+  using T = tables::ExperimentalFlamegraphNodesTable;
+
+  // TODO(lalitm): make it so that this happens on the macro table itself.
+  auto& cols = *schema->mutable_columns();
+  cols[static_cast<uint32_t>(T::ColumnIndex::ts)].set_hidden(true);
+  cols[static_cast<uint32_t>(T::ColumnIndex::upid)].set_hidden(true);
+  cols[static_cast<uint32_t>(T::ColumnIndex::profile_type)].set_hidden(true);
+
+  return util::OkStatus();
+}
+
+int SqliteExperimentalFlamegraphTable::BestIndex(const QueryConstraints& qc,
+                                                 BestIndexInfo*) {
+  using T = tables::ExperimentalFlamegraphNodesTable;
+
+  const auto& cs = qc.constraints();
+
+  auto ts_fn = [](const QueryConstraints::Constraint& c) {
+    return c.column == static_cast<int>(T::ColumnIndex::ts) &&
+           c.op == SQLITE_INDEX_CONSTRAINT_EQ;
+  };
+  bool has_ts_cs = std::find_if(cs.begin(), cs.end(), ts_fn) != cs.end();
+
+  auto upid_fn = [](const QueryConstraints::Constraint& c) {
+    return c.column == static_cast<int>(T::ColumnIndex::upid) &&
+           c.op == SQLITE_INDEX_CONSTRAINT_EQ;
+  };
+  bool has_upid_cs = std::find_if(cs.begin(), cs.end(), upid_fn) != cs.end();
+
+  auto profile_type_fn = [](const QueryConstraints::Constraint& c) {
+    return c.column == static_cast<int>(T::ColumnIndex::profile_type) &&
+           c.op == SQLITE_INDEX_CONSTRAINT_EQ;
+  };
+  bool has_profile_type_cs =
+      std::find_if(cs.begin(), cs.end(), profile_type_fn) != cs.end();
+
+  return has_ts_cs && has_upid_cs && has_profile_type_cs ? SQLITE_OK
+                                                         : SQLITE_CONSTRAINT;
+}
+
+std::unique_ptr<SqliteTable::Cursor>
+SqliteExperimentalFlamegraphTable::CreateCursor() {
+  return std::unique_ptr<Cursor>(new Cursor(this, context_));
+}
+
+SqliteExperimentalFlamegraphTable::Cursor::Cursor(
+    SqliteTable* sqlite_table,
+    TraceProcessorContext* context)
+    : DbSqliteTable::Cursor(sqlite_table, nullptr), context_(context) {}
+
+int SqliteExperimentalFlamegraphTable::Cursor::Filter(
+    const QueryConstraints& qc,
+    sqlite3_value** argv,
+    FilterHistory) {
+  // Extract the old table to free after we call the parent Filter function.
+  // We need to do this to make sure that we don't get a use-after-free for
+  // any pointers the parent is holding onto in this table.
+  auto old_table = std::move(table_);
+
+  // Get the input column values and compute the flamegraph using them.
+  values_ = GetInputValues(qc, argv);
+
+  // TODO(fmayer): extend this to support native profile as well.
+  if (values_.profile_type == "graph") {
+    auto* tracker = HeapGraphTracker::GetOrCreate(context_);
+    table_ = tracker->BuildFlamegraph(values_.ts, values_.upid);
+  }
+
+  // table_ can be nullptr precisely where the constraints passed to us don't
+  // make sense. Therefore, we can just return this to SQLite.
+  if (!table_)
+    return SQLITE_CONSTRAINT;
+
+  // Set the table in the parent to the correct value and then filter.
+  DbSqliteTable::Cursor::set_table(table_.get());
+  return DbSqliteTable::Cursor::Filter(qc, argv, FilterHistory::kDifferent);
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/sqlite_experimental_flamegraph_table.h b/src/trace_processor/sqlite_experimental_flamegraph_table.h
new file mode 100644
index 0000000..e531814
--- /dev/null
+++ b/src/trace_processor/sqlite_experimental_flamegraph_table.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_SQLITE_EXPERIMENTAL_FLAMEGRAPH_TABLE_H_
+#define SRC_TRACE_PROCESSOR_SQLITE_EXPERIMENTAL_FLAMEGRAPH_TABLE_H_
+
+#include "src/trace_processor/sqlite/db_sqlite_table.h"
+
+#include "src/trace_processor/trace_storage.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+class TraceProcessorContext;
+
+class SqliteExperimentalFlamegraphTable : public SqliteTable {
+ public:
+  struct InputValues {
+    int64_t ts;
+    UniquePid upid;
+    std::string profile_type;
+  };
+
+  class Cursor : public DbSqliteTable::Cursor {
+   public:
+    Cursor(SqliteTable*, TraceProcessorContext*);
+
+    int Filter(const QueryConstraints& qc,
+               sqlite3_value** argv,
+               FilterHistory) override;
+
+   private:
+    TraceProcessorContext* context_ = nullptr;
+
+    std::unique_ptr<Table> table_;
+    InputValues values_ = {};
+  };
+
+  SqliteExperimentalFlamegraphTable(sqlite3*, TraceProcessorContext*);
+  ~SqliteExperimentalFlamegraphTable() override;
+
+  static void RegisterTable(sqlite3* db, TraceProcessorContext* storage);
+
+  // SqliteTable implementation.
+  util::Status Init(int,
+                    const char* const*,
+                    SqliteTable::Schema*) override final;
+  std::unique_ptr<SqliteTable::Cursor> CreateCursor() override;
+  int BestIndex(const QueryConstraints&, BestIndexInfo*) override;
+
+ private:
+  friend class Cursor;
+
+  TraceProcessorContext* context_;
+};
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+#endif  // SRC_TRACE_PROCESSOR_SQLITE_EXPERIMENTAL_FLAMEGRAPH_TABLE_H_
diff --git a/src/trace_processor/stack_profile_tracker.cc b/src/trace_processor/stack_profile_tracker.cc
index 7331f85..63443f4 100644
--- a/src/trace_processor/stack_profile_tracker.cc
+++ b/src/trace_processor/stack_profile_tracker.cc
@@ -43,14 +43,14 @@
   string_map_.emplace(id, str.ToStdString());
 }
 
-base::Optional<int64_t> StackProfileTracker::AddMapping(
+base::Optional<MappingId> StackProfileTracker::AddMapping(
     SourceMappingId id,
     const SourceMapping& mapping,
     const InternLookup* intern_lookup) {
   std::string path;
   for (SourceStringId str_id : mapping.name_ids) {
-    auto opt_str =
-        FindString(str_id, intern_lookup, InternedStringType::kMappingPath);
+    auto opt_str = FindOrInsertString(str_id, intern_lookup,
+                                      InternedStringType::kMappingPath);
     if (!opt_str)
       break;
     path += "/" + *opt_str;
@@ -60,7 +60,7 @@
                                           InternedStringType::kBuildId);
   if (!opt_build_id) {
     context_->storage->IncrementStats(stats::stackprofile_invalid_string_id);
-    PERFETTO_DFATAL("Invalid string.");
+    PERFETTO_DLOG("Invalid string.");
     return base::nullopt;
   }
   const StringId raw_build_id = opt_build_id.value();
@@ -84,42 +84,40 @@
 
   tables::StackProfileMappingTable* mappings =
       context_->storage->mutable_stack_profile_mapping_table();
-  int64_t cur_row = -1;
+  base::Optional<MappingId> cur_id;
   auto it = mapping_idx_.find(row);
   if (it != mapping_idx_.end()) {
-    cur_row = it->second;
+    cur_id = it->second;
   } else {
-    std::vector<int64_t> db_mappings =
+    std::vector<MappingId> db_mappings =
         context_->storage->FindMappingRow(row.name, row.build_id);
-    for (const int64_t preexisting_mapping : db_mappings) {
-      PERFETTO_DCHECK(preexisting_mapping >= 0);
-      uint32_t preexisting_row_id = static_cast<uint32_t>(preexisting_mapping);
-      tables::StackProfileMappingTable::Row preexisting_row{
-          mappings->build_id()[preexisting_row_id],
-          mappings->exact_offset()[preexisting_row_id],
-          mappings->start_offset()[preexisting_row_id],
-          mappings->start()[preexisting_row_id],
-          mappings->end()[preexisting_row_id],
-          mappings->load_bias()[preexisting_row_id],
-          mappings->name()[preexisting_row_id]};
+    for (const MappingId preexisting_mapping : db_mappings) {
+      uint32_t preexisting_row = *mappings->id().IndexOf(preexisting_mapping);
+      tables::StackProfileMappingTable::Row preexisting_data{
+          mappings->build_id()[preexisting_row],
+          mappings->exact_offset()[preexisting_row],
+          mappings->start_offset()[preexisting_row],
+          mappings->start()[preexisting_row],
+          mappings->end()[preexisting_row],
+          mappings->load_bias()[preexisting_row],
+          mappings->name()[preexisting_row]};
 
-      if (row == preexisting_row) {
-        cur_row = preexisting_mapping;
+      if (row == preexisting_data) {
+        cur_id = preexisting_mapping;
       }
     }
-    if (cur_row == -1) {
+    if (!cur_id) {
       MappingId mapping_id = mappings->Insert(row);
-      uint32_t mapping_row = *mappings->id().IndexOf(mapping_id);
-      context_->storage->InsertMappingRow(row.name, row.build_id, mapping_row);
-      cur_row = mapping_row;
+      context_->storage->InsertMappingId(row.name, row.build_id, mapping_id);
+      cur_id = mapping_id;
     }
-    mapping_idx_.emplace(row, cur_row);
+    mapping_idx_.emplace(row, *cur_id);
   }
-  mappings_.emplace(id, cur_row);
-  return cur_row;
+  mapping_ids_.emplace(id, *cur_id);
+  return cur_id;
 }
 
-base::Optional<int64_t> StackProfileTracker::AddFrame(
+base::Optional<FrameId> StackProfileTracker::AddFrame(
     SourceFrameId id,
     const SourceFrame& frame,
     const InternLookup* intern_lookup) {
@@ -127,98 +125,97 @@
                                         InternedStringType::kFunctionName);
   if (!opt_str_id) {
     context_->storage->IncrementStats(stats::stackprofile_invalid_string_id);
-    PERFETTO_DFATAL("Invalid string.");
+    PERFETTO_DLOG("Invalid string.");
     return base::nullopt;
   }
   const StringId& str_id = opt_str_id.value();
 
-  auto maybe_mapping = FindMapping(frame.mapping_id, intern_lookup);
+  auto maybe_mapping = FindOrInsertMapping(frame.mapping_id, intern_lookup);
   if (!maybe_mapping) {
     context_->storage->IncrementStats(stats::stackprofile_invalid_mapping_id);
     PERFETTO_ELOG("Invalid mapping for frame %" PRIu64, id);
     return base::nullopt;
   }
-  int64_t mapping_row = *maybe_mapping;
+  MappingId mapping_id = *maybe_mapping;
 
-  tables::StackProfileFrameTable::Row row{str_id, mapping_row,
+  tables::StackProfileFrameTable::Row row{str_id, mapping_id.value,
                                           static_cast<int64_t>(frame.rel_pc)};
 
   auto* frames = context_->storage->mutable_stack_profile_frame_table();
 
-  int64_t cur_row = -1;
+  base::Optional<FrameId> cur_id;
   auto it = frame_idx_.find(row);
   if (it != frame_idx_.end()) {
-    cur_row = it->second;
+    cur_id = it->second;
   } else {
-    std::vector<int64_t> db_frames = context_->storage->FindFrameRow(
-        static_cast<size_t>(mapping_row), frame.rel_pc);
-    for (const int64_t preexisting_frame : db_frames) {
-      PERFETTO_DCHECK(preexisting_frame >= 0);
-      uint32_t preexisting_row_id = static_cast<uint32_t>(preexisting_frame);
+    std::vector<FrameId> db_frames =
+        context_->storage->FindFrameIds(mapping_id, frame.rel_pc);
+    for (const FrameId preexisting_frame : db_frames) {
+      uint32_t preexisting_row_id = preexisting_frame.value;
       tables::StackProfileFrameTable::Row preexisting_row{
           frames->name()[preexisting_row_id],
           frames->mapping()[preexisting_row_id],
           frames->rel_pc()[preexisting_row_id]};
 
       if (row == preexisting_row) {
-        cur_row = preexisting_frame;
+        cur_id = preexisting_frame;
       }
     }
-    if (cur_row == -1) {
-      auto new_id = frames->Insert(row);
-      cur_row = *frames->id().IndexOf(new_id);
-      context_->storage->InsertFrameRow(static_cast<size_t>(row.mapping),
-                                        static_cast<uint64_t>(row.rel_pc),
-                                        static_cast<uint32_t>(cur_row));
+    if (!cur_id) {
+      cur_id = frames->Insert(row);
+      context_->storage->InsertFrameRow(
+          mapping_id, static_cast<uint64_t>(row.rel_pc), *cur_id);
     }
-    frame_idx_.emplace(row, cur_row);
+    frame_idx_.emplace(row, *cur_id);
   }
-  frames_.emplace(id, cur_row);
-  return cur_row;
+  frame_ids_.emplace(id, *cur_id);
+  return cur_id;
 }
 
-base::Optional<int64_t> StackProfileTracker::AddCallstack(
+base::Optional<CallsiteId> StackProfileTracker::AddCallstack(
     SourceCallstackId id,
     const SourceCallstack& frame_ids,
     const InternLookup* intern_lookup) {
   // TODO(fmayer): This should be NULL.
-  int64_t parent_id = -1;
+  base::Optional<CallsiteId> parent_id;
   for (size_t depth = 0; depth < frame_ids.size(); ++depth) {
-    SourceFrameId frame_id = frame_ids[depth];
-    auto maybe_frame_row = FindFrame(frame_id, intern_lookup);
-    if (!maybe_frame_row) {
+    auto maybe_frame_id = FindOrInsertFrame(frame_ids[depth], intern_lookup);
+    if (!maybe_frame_id) {
       context_->storage->IncrementStats(stats::stackprofile_invalid_frame_id);
       PERFETTO_ELOG("Unknown frame in callstack; ignoring.");
       return base::nullopt;
     }
-    int64_t frame_row = *maybe_frame_row;
+    FrameId frame_id = *maybe_frame_id;
 
+    // TODO(fmayer): Store roots as having null parent_id instead of -1.
+    int64_t db_parent_id = -1;
+    if (parent_id)
+      db_parent_id = parent_id->value;
     tables::StackProfileCallsiteTable::Row row{static_cast<int64_t>(depth),
-                                               parent_id, frame_row};
+                                               db_parent_id, frame_id.value};
 
-    int64_t self_id;
+    CallsiteId self_id;
     auto callsite_it = callsite_idx_.find(row);
     if (callsite_it != callsite_idx_.end()) {
       self_id = callsite_it->second;
     } else {
       auto* callsite =
           context_->storage->mutable_stack_profile_callsite_table();
-      auto callsite_id = callsite->Insert(row);
-      self_id = callsite_id.value;
+      self_id = callsite->Insert(row);
       callsite_idx_.emplace(row, self_id);
     }
     parent_id = self_id;
   }
-  callstacks_.emplace(id, parent_id);
+  callstack_ids_.emplace(id, *parent_id);
   return parent_id;
 }
 
-int64_t StackProfileTracker::GetDatabaseFrameIdForTesting(
+FrameId StackProfileTracker::GetDatabaseFrameIdForTesting(
     SourceFrameId frame_id) {
-  auto it = frames_.find(frame_id);
-  if (it == frames_.end()) {
-    PERFETTO_DFATAL("Invalid frame.");
-    return -1;
+  auto it = frame_ids_.find(frame_id);
+  if (it == frame_ids_.end()) {
+    PERFETTO_DLOG("Invalid frame.");
+    return {};
   }
   return it->second;
 }
@@ -230,14 +227,14 @@
   if (id == 0)
     return GetEmptyStringId();
 
-  auto opt_str = FindString(id, intern_lookup, type);
+  auto opt_str = FindOrInsertString(id, intern_lookup, type);
   if (!opt_str)
     return GetEmptyStringId();
 
   return context_->storage->InternString(base::StringView(*opt_str));
 }
 
-base::Optional<std::string> StackProfileTracker::FindString(
+base::Optional<std::string> StackProfileTracker::FindOrInsertString(
     SourceStringId id,
     const InternLookup* intern_lookup,
     StackProfileTracker::InternedStringType type) {
@@ -251,7 +248,7 @@
       if (!str) {
         context_->storage->IncrementStats(
             stats::stackprofile_invalid_string_id);
-        PERFETTO_DFATAL("Invalid string.");
+        PERFETTO_DLOG("Invalid string.");
         return base::nullopt;
       }
       return str->ToStdString();
@@ -262,12 +259,12 @@
   return it->second;
 }
 
-base::Optional<int64_t> StackProfileTracker::FindMapping(
+base::Optional<MappingId> StackProfileTracker::FindOrInsertMapping(
     SourceMappingId mapping_id,
     const InternLookup* intern_lookup) {
-  base::Optional<int64_t> res;
-  auto it = mappings_.find(mapping_id);
-  if (it == mappings_.end()) {
+  base::Optional<MappingId> res;
+  auto it = mapping_ids_.find(mapping_id);
+  if (it == mapping_ids_.end()) {
     if (intern_lookup) {
       auto interned_mapping = intern_lookup->GetMapping(mapping_id);
       if (interned_mapping) {
@@ -277,19 +274,19 @@
     }
     context_->storage->IncrementStats(stats::stackprofile_invalid_mapping_id);
     PERFETTO_ELOG("Unknown mapping %" PRIu64 " : %zu", mapping_id,
-                  mappings_.size());
+                  mapping_ids_.size());
     return res;
   }
   res = it->second;
   return res;
 }
 
-base::Optional<int64_t> StackProfileTracker::FindFrame(
+base::Optional<FrameId> StackProfileTracker::FindOrInsertFrame(
     SourceFrameId frame_id,
     const InternLookup* intern_lookup) {
-  base::Optional<int64_t> res;
-  auto it = frames_.find(frame_id);
-  if (it == frames_.end()) {
+  base::Optional<FrameId> res;
+  auto it = frame_ids_.find(frame_id);
+  if (it == frame_ids_.end()) {
     if (intern_lookup) {
       auto interned_frame = intern_lookup->GetFrame(frame_id);
       if (interned_frame) {
@@ -298,28 +295,28 @@
       }
     }
     context_->storage->IncrementStats(stats::stackprofile_invalid_frame_id);
-    PERFETTO_DFATAL("Unknown frame %" PRIu64 " : %zu", frame_id,
-                    frames_.size());
+    PERFETTO_DLOG("Unknown frame %" PRIu64 " : %zu", frame_id,
+                  frame_ids_.size());
     return res;
   }
   res = it->second;
   return res;
 }
 
-base::Optional<int64_t> StackProfileTracker::FindCallstack(
+base::Optional<CallsiteId> StackProfileTracker::FindOrInsertCallstack(
     SourceCallstackId callstack_id,
     const InternLookup* intern_lookup) {
-  base::Optional<int64_t> res;
-  auto it = callstacks_.find(callstack_id);
-  if (it == callstacks_.end()) {
+  base::Optional<CallsiteId> res;
+  auto it = callstack_ids_.find(callstack_id);
+  if (it == callstack_ids_.end()) {
     auto interned_callstack = intern_lookup->GetCallstack(callstack_id);
     if (interned_callstack) {
       res = AddCallstack(callstack_id, *interned_callstack, intern_lookup);
       return res;
     }
     context_->storage->IncrementStats(stats::stackprofile_invalid_callstack_id);
-    PERFETTO_DFATAL("Unknown callstack %" PRIu64 " : %zu", callstack_id,
-                    callstacks_.size());
+    PERFETTO_DLOG("Unknown callstack %" PRIu64 " : %zu", callstack_id,
+                  callstack_ids_.size());
     return res;
   }
   res = it->second;
@@ -328,9 +325,9 @@
 
 void StackProfileTracker::ClearIndices() {
   string_map_.clear();
-  mappings_.clear();
-  callstacks_.clear();
-  frames_.clear();
+  mapping_ids_.clear();
+  callstack_ids_.clear();
+  frame_ids_.clear();
 }
 
 }  // namespace trace_processor
diff --git a/src/trace_processor/stack_profile_tracker.h b/src/trace_processor/stack_profile_tracker.h
index 7862809..575715c 100644
--- a/src/trace_processor/stack_profile_tracker.h
+++ b/src/trace_processor/stack_profile_tracker.h
@@ -39,6 +39,41 @@
 };
 
 template <>
+struct hash<std::pair<uint32_t, perfetto::trace_processor::CallsiteId>> {
+  using argument_type =
+      std::pair<uint32_t, perfetto::trace_processor::CallsiteId>;
+  using result_type = size_t;
+
+  result_type operator()(const argument_type& p) const {
+    return std::hash<uint32_t>{}(p.first) ^
+           std::hash<uint32_t>{}(p.second.value);
+  }
+};
+
+template <>
+struct hash<std::pair<uint32_t, perfetto::trace_processor::MappingId>> {
+  using argument_type =
+      std::pair<uint32_t, perfetto::trace_processor::MappingId>;
+  using result_type = size_t;
+
+  result_type operator()(const argument_type& p) const {
+    return std::hash<uint32_t>{}(p.first) ^
+           std::hash<uint32_t>{}(p.second.value);
+  }
+};
+
+template <>
+struct hash<std::pair<uint32_t, perfetto::trace_processor::FrameId>> {
+  using argument_type = std::pair<uint32_t, perfetto::trace_processor::FrameId>;
+  using result_type = size_t;
+
+  result_type operator()(const argument_type& p) const {
+    return std::hash<uint32_t>{}(p.first) ^
+           std::hash<uint32_t>{}(p.second.value);
+  }
+};
+
+template <>
 struct hash<std::vector<uint64_t>> {
   using argument_type = std::vector<uint64_t>;
   using result_type = size_t;
@@ -119,19 +154,19 @@
   ~StackProfileTracker();
 
   void AddString(SourceStringId, base::StringView);
-  base::Optional<int64_t> AddMapping(
+  base::Optional<MappingId> AddMapping(
       SourceMappingId,
       const SourceMapping&,
       const InternLookup* intern_lookup = nullptr);
-  base::Optional<int64_t> AddFrame(SourceFrameId,
+  base::Optional<FrameId> AddFrame(SourceFrameId,
                                    const SourceFrame&,
                                    const InternLookup* intern_lookup = nullptr);
-  base::Optional<int64_t> AddCallstack(
+  base::Optional<CallsiteId> AddCallstack(
       SourceCallstackId,
       const SourceCallstack&,
       const InternLookup* intern_lookup = nullptr);
 
-  int64_t GetDatabaseFrameIdForTesting(SourceFrameId);
+  FrameId GetDatabaseFrameIdForTesting(SourceFrameId);
 
   // Gets the row number of string / mapping / frame / callstack previously
   // added through AddString / AddMapping/ AddFrame / AddCallstack.
@@ -147,15 +182,18 @@
       SourceStringId,
       const InternLookup* intern_lookup,
       InternedStringType type);
-  base::Optional<std::string> FindString(SourceStringId,
-                                         const InternLookup* intern_lookup,
-                                         InternedStringType type);
-  base::Optional<int64_t> FindMapping(SourceMappingId,
-                                      const InternLookup* intern_lookup);
-  base::Optional<int64_t> FindFrame(SourceFrameId,
-                                    const InternLookup* intern_lookup);
-  base::Optional<int64_t> FindCallstack(SourceCallstackId,
-                                        const InternLookup* intern_lookup);
+  base::Optional<std::string> FindOrInsertString(
+      SourceStringId,
+      const InternLookup* intern_lookup,
+      InternedStringType type);
+  base::Optional<MappingId> FindOrInsertMapping(
+      SourceMappingId,
+      const InternLookup* intern_lookup);
+  base::Optional<FrameId> FindOrInsertFrame(SourceFrameId,
+                                            const InternLookup* intern_lookup);
+  base::Optional<CallsiteId> FindOrInsertCallstack(
+      SourceCallstackId,
+      const InternLookup* intern_lookup);
 
   // Clear indices when they're no longer needed.
   void ClearIndices();
@@ -164,16 +202,21 @@
   StringId GetEmptyStringId();
 
   std::unordered_map<SourceStringId, std::string> string_map_;
-  std::unordered_map<SourceMappingId, int64_t> mappings_;
-  std::unordered_map<SourceFrameId, int64_t> frames_;
-  std::unordered_map<SourceCallstackId, int64_t> callstacks_;
+
+  // Mapping from ID of mapping / frame / callstack in original trace and the
+  // index in the respective table it was inserted into.
+  std::unordered_map<SourceMappingId, MappingId> mapping_ids_;
+  std::unordered_map<SourceFrameId, FrameId> frame_ids_;
+  std::unordered_map<SourceCallstackId, CallsiteId> callstack_ids_;
 
   // TODO(oysteine): Share these indices between the StackProfileTrackers,
   // since they're not sequence-specific.
-  std::unordered_map<tables::StackProfileMappingTable::Row, int64_t>
+  //
+  // Mapping from content of database row to the index of the raw.
+  std::unordered_map<tables::StackProfileMappingTable::Row, MappingId>
       mapping_idx_;
-  std::unordered_map<tables::StackProfileFrameTable::Row, int64_t> frame_idx_;
-  std::unordered_map<tables::StackProfileCallsiteTable::Row, int64_t>
+  std::unordered_map<tables::StackProfileFrameTable::Row, FrameId> frame_idx_;
+  std::unordered_map<tables::StackProfileCallsiteTable::Row, CallsiteId>
       callsite_idx_;
 
   TraceProcessorContext* const context_;
diff --git a/src/trace_processor/tables/macros_internal.h b/src/trace_processor/tables/macros_internal.h
index 71895b2..a4fbab7 100644
--- a/src/trace_processor/tables/macros_internal.h
+++ b/src/trace_processor/tables/macros_internal.h
@@ -264,6 +264,7 @@
       explicit constexpr DefinedId(uint32_t v) : value(v) {}                  \
                                                                               \
       bool operator==(const DefinedId& o) const { return o.value == value; }  \
+      bool operator<(const DefinedId& o) const { return value < o.value; }    \
                                                                               \
       uint32_t value;                                                         \
     };                                                                        \
@@ -308,6 +309,12 @@
       PERFETTO_TP_TABLE_COLUMNS(DEF, PERFETTO_TP_ROW_DEFINITION)              \
     };                                                                        \
                                                                               \
+    enum class ColumnIndex : uint32_t {                                       \
+      id,                                                                     \
+      type, /* Expands to col1, col2, ... */                                  \
+      PERFETTO_TP_ALL_COLUMNS(DEF, PERFETTO_TP_NAME_COMMA) kNumCols           \
+    };                                                                        \
+                                                                              \
     class_name(StringPool* pool, parent_class_name* parent)                   \
         : macros_internal::MacroTable(table_name, pool, parent),              \
           parent_(parent) {                                                   \
@@ -363,12 +370,6 @@
     PERFETTO_TP_ALL_COLUMNS(DEF, PERFETTO_TP_TABLE_COL_ACCESSOR)              \
                                                                               \
    private:                                                                   \
-    enum class ColumnIndex : uint32_t {                                       \
-      id,                                                                     \
-      type, /* Expands to col1, col2, ... */                                  \
-      PERFETTO_TP_ALL_COLUMNS(DEF, PERFETTO_TP_NAME_COMMA) kNumCols           \
-    };                                                                        \
-                                                                              \
     parent_class_name* parent_;                                               \
                                                                               \
     /*                                                                        \
diff --git a/src/trace_processor/tables/profiler_tables.h b/src/trace_processor/tables/profiler_tables.h
index 7cbf445..5f529eb 100644
--- a/src/trace_processor/tables/profiler_tables.h
+++ b/src/trace_processor/tables/profiler_tables.h
@@ -87,18 +87,22 @@
 
 // This will eventually go away, when we also pre-compute the cumulative
 // sizes for native heap profiles.
-#define PERFETTO_TP_HEAP_GRAPH_ALLOCATION_DEF(NAME, PARENT, C)            \
+#define PERFETTO_TP_EXPERIMENTAL_FLAMEGRAPH_NODES(NAME, PARENT, C)        \
   NAME(ExperimentalFlamegraphNodesTable, "experimental_flamegraph_nodes") \
   PERFETTO_TP_ROOT_TABLE(PARENT, C)                                       \
   C(int64_t, ts, Column::Flag::kSorted)                                   \
   C(uint32_t, upid)                                                       \
-  C(int64_t, callsite_id)                                                 \
+  C(StringPool::Id, profile_type)                                         \
+  C(uint32_t, depth)                                                      \
+  C(StringPool::Id, name)                                                 \
+  C(StringPool::Id, map_name)                                             \
   C(int64_t, count)                                                       \
   C(int64_t, cumulative_count)                                            \
   C(int64_t, size)                                                        \
-  C(int64_t, cumulative_size)
+  C(int64_t, cumulative_size)                                             \
+  C(base::Optional<uint32_t>, parent_id)
 
-PERFETTO_TP_TABLE(PERFETTO_TP_HEAP_GRAPH_ALLOCATION_DEF);
+PERFETTO_TP_TABLE(PERFETTO_TP_EXPERIMENTAL_FLAMEGRAPH_NODES);
 
 #define PERFETTO_TP_HEAP_GRAPH_OBJECT_DEF(NAME, PARENT, C)  \
   NAME(HeapGraphObjectTable, "heap_graph_object")           \
diff --git a/src/trace_processor/trace_processor_context.h b/src/trace_processor/trace_processor_context.h
index 310005f..63e0886 100644
--- a/src/trace_processor/trace_processor_context.h
+++ b/src/trace_processor/trace_processor_context.h
@@ -73,9 +73,10 @@
   // type is only available in the storage_full target. To access these fields,
   // use the GetOrCreate() method on their subclass type,
   // e.g. SyscallTracker::GetOrCreate(context).
-  std::unique_ptr<Destructible> syscall_tracker;  // SyscallTracker
-  std::unique_ptr<Destructible> sched_tracker;    // SchedEventTracker
-  std::unique_ptr<Destructible> systrace_parser;  // SystraceParser
+  std::unique_ptr<Destructible> syscall_tracker;     // SyscallTracker
+  std::unique_ptr<Destructible> sched_tracker;       // SchedEventTracker
+  std::unique_ptr<Destructible> systrace_parser;     // SystraceParser
+  std::unique_ptr<Destructible> heap_graph_tracker;  // HeapGraphTracker
 
   // This will be nullptr in the minimal build (storage_minimal target), and
   // a pointer to the instance of SystraceTraceParser class in the full build
diff --git a/src/trace_processor/trace_processor_impl.cc b/src/trace_processor/trace_processor_impl.cc
index 9b44058..154dd3e 100644
--- a/src/trace_processor/trace_processor_impl.cc
+++ b/src/trace_processor/trace_processor_impl.cc
@@ -32,6 +32,7 @@
 #include "src/trace_processor/sqlite/db_sqlite_table.h"
 #include "src/trace_processor/sqlite/sqlite3_str_split.h"
 #include "src/trace_processor/sqlite/sqlite_table.h"
+#include "src/trace_processor/sqlite_experimental_flamegraph_table.h"
 #include "src/trace_processor/sqlite_raw_table.h"
 #include "src/trace_processor/stats_table.h"
 #include "src/trace_processor/types/variadic.h"
@@ -384,11 +385,14 @@
 
   SchedSliceTable::RegisterTable(*db_, storage);
   SqlStatsTable::RegisterTable(*db_, storage);
-  SpanJoinOperatorTable::RegisterTable(*db_, storage);
-  WindowOperatorTable::RegisterTable(*db_, storage);
   StatsTable::RegisterTable(*db_, storage);
 
+  // Operator tables.
+  SpanJoinOperatorTable::RegisterTable(*db_, storage);
+  WindowOperatorTable::RegisterTable(*db_, storage);
+
   // New style tables but with some custom logic.
+  SqliteExperimentalFlamegraphTable::RegisterTable(*db_, &context_);
   SqliteRawTable::RegisterTable(*db_, context_.storage.get());
 
   // New style db-backed tables.
@@ -448,9 +452,6 @@
       *db_, &storage->heap_profile_allocation_table(),
       storage->heap_profile_allocation_table().table_name());
   DbSqliteTable::RegisterTable(
-      *db_, &storage->experimental_flamegraph_nodes_table(),
-      storage->experimental_flamegraph_nodes_table().table_name());
-  DbSqliteTable::RegisterTable(
       *db_, &storage->cpu_profile_stack_sample_table(),
       storage->cpu_profile_stack_sample_table().table_name());
   DbSqliteTable::RegisterTable(
diff --git a/src/trace_processor/trace_storage.h b/src/trace_processor/trace_storage.h
index 8b0634b..db6ac60 100644
--- a/src/trace_processor/trace_storage.h
+++ b/src/trace_processor/trace_storage.h
@@ -73,6 +73,10 @@
 
 using MappingId = tables::StackProfileMappingTable::Id;
 
+using FrameId = tables::StackProfileFrameTable::Id;
+
+using CallsiteId = tables::StackProfileCallsiteTable::Id;
+
 using MetadataId = tables::MetadataTable::Id;
 
 using RawId = tables::RawTable::Id;
@@ -558,15 +562,6 @@
     return &heap_profile_allocation_table_;
   }
 
-  const tables::ExperimentalFlamegraphNodesTable&
-  experimental_flamegraph_nodes_table() const {
-    return experimental_flamegraph_nodes_table_;
-  }
-  tables::ExperimentalFlamegraphNodesTable*
-  mutable_experimental_flamegraph_nodes_table() {
-    return &experimental_flamegraph_nodes_table_;
-  }
-
   const tables::CpuProfileStackSampleTable& cpu_profile_stack_sample_table()
       const {
     return cpu_profile_stack_sample_table_;
@@ -611,6 +606,7 @@
   }
 
   const StringPool& string_pool() const { return string_pool_; }
+  StringPool* mutable_string_pool() { return &string_pool_; }
 
   // Number of interned strings in the pool. Includes the empty string w/ ID=0.
   size_t string_count() const { return string_pool_.size(); }
@@ -620,7 +616,8 @@
   std::pair<int64_t, int64_t> GetTraceTimestampBoundsNs() const;
 
   // TODO(lalitm): remove this when we have a better home.
-  std::vector<int64_t> FindMappingRow(StringId name, StringId build_id) const {
+  std::vector<MappingId> FindMappingRow(StringId name,
+                                        StringId build_id) const {
     auto it = stack_profile_mapping_index_.find(std::make_pair(name, build_id));
     if (it == stack_profile_mapping_index_.end())
       return {};
@@ -628,13 +625,14 @@
   }
 
   // TODO(lalitm): remove this when we have a better home.
-  void InsertMappingRow(StringId name, StringId build_id, uint32_t row) {
+  void InsertMappingId(StringId name, StringId build_id, MappingId row) {
     auto pair = std::make_pair(name, build_id);
     stack_profile_mapping_index_[pair].emplace_back(row);
   }
 
   // TODO(lalitm): remove this when we have a better home.
-  std::vector<int64_t> FindFrameRow(size_t mapping_row, uint64_t rel_pc) const {
+  std::vector<FrameId> FindFrameIds(MappingId mapping_row,
+                                    uint64_t rel_pc) const {
     auto it =
         stack_profile_frame_index_.find(std::make_pair(mapping_row, rel_pc));
     if (it == stack_profile_frame_index_.end())
@@ -643,7 +641,7 @@
   }
 
   // TODO(lalitm): remove this when we have a better home.
-  void InsertFrameRow(size_t mapping_row, uint64_t rel_pc, uint32_t row) {
+  void InsertFrameRow(MappingId mapping_row, uint64_t rel_pc, FrameId row) {
     auto pair = std::make_pair(mapping_row, rel_pc);
     stack_profile_frame_index_[pair].emplace_back(row);
   }
@@ -706,11 +704,11 @@
 
   // TODO(lalitm): remove this when we find a better home for this.
   using MappingKey = std::pair<StringId /* name */, StringId /* build id */>;
-  std::map<MappingKey, std::vector<int64_t>> stack_profile_mapping_index_;
+  std::map<MappingKey, std::vector<MappingId>> stack_profile_mapping_index_;
 
   // TODO(lalitm): remove this when we find a better home for this.
-  using FrameKey = std::pair<size_t /* mapping row */, uint64_t /* rel_pc */>;
-  std::map<FrameKey, std::vector<int64_t>> stack_profile_frame_index_;
+  using FrameKey = std::pair<MappingId, uint64_t /* rel_pc */>;
+  std::map<FrameKey, std::vector<FrameId>> stack_profile_frame_index_;
 
   // One entry for each unique string in the trace.
   StringPool string_pool_;
@@ -794,8 +792,6 @@
                                                                   nullptr};
   tables::HeapProfileAllocationTable heap_profile_allocation_table_{
       &string_pool_, nullptr};
-  tables::ExperimentalFlamegraphNodesTable experimental_flamegraph_nodes_table_{
-      &string_pool_, nullptr};
   tables::CpuProfileStackSampleTable cpu_profile_stack_sample_table_{
       &string_pool_, nullptr};
 
diff --git a/src/tracing/api_integrationtest.cc b/src/tracing/api_integrationtest.cc
index cfeba05..2d43bf3 100644
--- a/src/tracing/api_integrationtest.cc
+++ b/src/tracing/api_integrationtest.cc
@@ -24,6 +24,9 @@
 #include <thread>
 #include <vector>
 
+// We also want to test legacy trace events.
+#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 1
+
 #include "perfetto/tracing.h"
 #include "test/gtest_and_gmock.h"
 
@@ -1948,6 +1951,36 @@
   EXPECT_EQ(packets_found, 1 | 2 | 4 | 8);
 }
 
+TEST_F(PerfettoApiTest, LegacyTraceEvents) {
+  // TODO(skyostil): For now we just test that all variants of legacy trace
+  // points compile. Test actual functionality when implemented.
+
+  // Basic events.
+  TRACE_EVENT_BEGIN1("cat", "LegacyEvent", "arg", 123);
+  TRACE_EVENT_END2("cat", "LegacyEvent", "arg", "string", "arg2", 0.123f);
+
+  // Scoped event.
+  { TRACE_EVENT0("cat", "ScopedLegacyEvent"); }
+
+  // Event with flow (and disabled category).
+  TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("cat"), "LegacyFlowEvent",
+                         0xdadacafe, TRACE_EVENT_FLAG_FLOW_IN);
+
+  // Event with timestamp.
+  TRACE_EVENT_INSTANT_WITH_TIMESTAMP0("cat", "LegacyInstantEvent",
+                                      TRACE_EVENT_SCOPE_GLOBAL, 123456789ul);
+
+  // Event with id, thread id and timestamp (and dynamic name).
+  TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+      "cat", std::string("LegacyWithIdTidAndTimestamp").c_str(), 1, 2, 3);
+
+  // Event with id.
+  TRACE_COUNTER_ID1("cat", "LegacyCounter", 1234, 9000);
+
+  // Metadata event.
+  TRACE_EVENT_METADATA1("cat", "LegacyMetadata", "obsolete", true);
+}
+
 }  // namespace
 
 PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(MockDataSource);
diff --git a/src/tracing/event_context.cc b/src/tracing/event_context.cc
index 2126e2c..a4f1ade 100644
--- a/src/tracing/event_context.cc
+++ b/src/tracing/event_context.cc
@@ -29,6 +29,9 @@
       incremental_state_(incremental_state) {}
 
 EventContext::~EventContext() {
+  if (!trace_packet_)
+    return;
+
   // When the track event is finalized (i.e., the context is destroyed), we
   // should flush any newly seen interned data to the trace. The data has
   // earlier been written to a heap allocated protobuf message
diff --git a/test/cts/AndroidTest.xml b/test/cts/AndroidTest.xml
index 8ec9da3..aff1988 100644
--- a/test/cts/AndroidTest.xml
+++ b/test/cts/AndroidTest.xml
@@ -33,7 +33,6 @@
         <option name="append-bitness" value="true" />
     </target_preparer>
     <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
-        <option name="run-command" value="setprop persist.traced.enable 1" />
         <option name="run-command" value="setprop persist.heapprofd.enable 1" />
         <option name="run-command" value="am start -n android.perfetto.producer/.ProducerActivity" />
     </target_preparer>
diff --git a/test/trace_processor/heap_graph_flamegraph.sql b/test/trace_processor/heap_graph_flamegraph.sql
new file mode 100644
index 0000000..157095f
--- /dev/null
+++ b/test/trace_processor/heap_graph_flamegraph.sql
@@ -0,0 +1,12 @@
+SELECT
+  id,
+  depth,
+  name,
+  map_name,
+  count,
+  cumulative_count,
+  size,
+  cumulative_size,
+  parent_id
+FROM experimental_flamegraph(601908408518618, 1, 'graph')
+LIMIT 10
diff --git a/test/trace_processor/heap_graph_flamegraph_system-server-heap-graph.out b/test/trace_processor/heap_graph_flamegraph_system-server-heap-graph.out
new file mode 100644
index 0000000..102c246
--- /dev/null
+++ b/test/trace_processor/heap_graph_flamegraph_system-server-heap-graph.out
@@ -0,0 +1,11 @@
+"id","depth","name","map_name","count","cumulative_count","size","cumulative_size","parent_id"
+0,4294967295,"java.lang.Class<boolean>","JAVA",2,4,240,276,"[NULL]"
+1,0,"java.lang.Object[]","JAVA",1,1,12,12,0
+2,0,"java.lang.String","JAVA",1,1,24,24,0
+3,4294967295,"java.lang.Class<byte>","JAVA",3,4,360,384,"[NULL]"
+4,0,"java.lang.String","JAVA",1,1,24,24,3
+5,4294967295,"java.lang.Class<short>","JAVA",2,3,240,264,"[NULL]"
+6,0,"java.lang.String","JAVA",1,1,24,24,5
+7,4294967295,"java.lang.Class<char>","JAVA",2,3,240,264,"[NULL]"
+8,0,"java.lang.String","JAVA",1,1,24,24,7
+9,4294967295,"java.lang.Class<int>","JAVA",2,3,240,264,"[NULL]"
diff --git a/test/trace_processor/index b/test/trace_processor/index
index b6e3d7d..612d838 100644
--- a/test/trace_processor/index
+++ b/test/trace_processor/index
@@ -151,6 +151,7 @@
 heap_graph.textproto heap_graph_reference.sql heap_graph_reference.out
 heap_graph_interleaved.textproto heap_graph_object.sql heap_graph_interleaved_object.out
 heap_graph_interleaved.textproto heap_graph_reference.sql heap_graph_interleaved_reference.out
+../data/system-server-heap-graph.pftrace heap_graph_flamegraph.sql heap_graph_flamegraph_system-server-heap-graph.out
 
 # TrackEvent tests.
 track_event_same_tids.textproto process_tracking.sql track_event_same_tids_threads.out
@@ -175,4 +176,3 @@
 
 # Trace size
 ../data/android_sched_and_ps.pb trace_size.sql android_sched_and_ps_trace_size.out
-
diff --git a/test/trace_processor/track_event_tracks.textproto b/test/trace_processor/track_event_tracks.textproto
index 368732e..b76fa36 100644
--- a/test/trace_processor/track_event_tracks.textproto
+++ b/test/trace_processor/track_event_tracks.textproto
@@ -244,4 +244,35 @@
     name: "event3_on_t1"
     type: 3
   }
-}
\ No newline at end of file
+}
+
+# Override the track to the default descriptor track for an event with a
+# TrackEvent type. Should appear on the default descriptor track instead of
+# "t1".
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 30000
+  track_event {
+    track_uuid: 0
+    categories: "cat"
+    name: "event1_on_t1"
+    type: 3
+  }
+}
+
+# But a legacy event without TrackEvent type falls back to legacy tracks (based
+# on ThreadDescriptor / async IDs / legacy instant scopes). This instant event
+# should appear on the process track "p2".
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 31000
+  track_event {
+    track_uuid: 0
+    categories: "cat"
+    name: "event1_on_t1"
+    legacy_event {
+      phase: 73               # 'I'
+      instant_event_scope: 2  # Process scope
+    }
+  }
+}
diff --git a/test/trace_processor/track_event_tracks_slices.out b/test/trace_processor/track_event_tracks_slices.out
index 444ccb9..a031897 100644
--- a/test/trace_processor/track_event_tracks_slices.out
+++ b/test/trace_processor/track_event_tracks_slices.out
@@ -9,3 +9,5 @@
 "[NULL]","[NULL]","t3","p1",11000,0,"cat","event1_on_t3",0
 "[NULL]","p2","[NULL]","[NULL]",21000,0,"cat","event1_on_p2",0
 "[NULL]","[NULL]","t4","p2",22000,0,"cat","event1_on_t4",0
+"Default Track","[NULL]","[NULL]","[NULL]",30000,0,"cat","event1_on_t1",0
+"[NULL]","p2","[NULL]","[NULL]",31000,0,"cat","event1_on_t1",5
diff --git a/tools/gen_bazel b/tools/gen_bazel
index f8150d3..00568b2 100755
--- a/tools/gen_bazel
+++ b/tools/gen_bazel
@@ -165,7 +165,10 @@
         continue
       res += '    %s = ' % k
       if isinstance(v, basestring):
-        res += '"%s",\n' % v
+        if v.startswith('PERFETTO_CONFIG.'):
+          res += '%s,\n' % v
+        else:
+          res += '"%s",\n' % v
       elif isinstance(v, bool):
         res += '%s,\n' % v
       elif isinstance(v, list):
@@ -198,7 +201,7 @@
 
 
 # Public visibility for targets in Bazel.
-PUBLIC_VISIBILITY = ['//visibility:public']
+PUBLIC_VISIBILITY = 'PERFETTO_CONFIG.public_visibility'
 
 
 def get_bazel_label_name(gn_name):
diff --git a/tools/install-build-deps b/tools/install-build-deps
index 7dcc6f7..200a22d 100755
--- a/tools/install-build-deps
+++ b/tools/install-build-deps
@@ -146,8 +146,8 @@
     # Example traces for regression tests.
     (
         'buildtools/test_data.zip',
-        'https://storage.googleapis.com/perfetto/test-data-20191107-164334.zip',
-        '499f11fbc2b04ef7742662a26b85ef03141e24bd',
+        'https://storage.googleapis.com/perfetto/test-data-20200120-171652.zip',
+        'a99364ac93ec2bd4407c5a392f759f45fb3f22b0',
         'all',
     ),
 
diff --git a/ui/src/frontend/help_modal.ts b/ui/src/frontend/help_modal.ts
index 39ff0e4..1227d47 100644
--- a/ui/src/frontend/help_modal.ts
+++ b/ui/src/frontend/help_modal.ts
@@ -35,41 +35,39 @@
   helpModelOpen = true;
   showModal({
     title: 'Perfetto Help',
-    content:
-        m('.help',
-          m('h2', 'Navigation'),
-          m(
-              'table',
-              m(
-                  'tr',
-                  m('td', keycap('w'), '/', keycap('s')),
-                  m('td', 'Zoom in/out'),
-                  ),
-              m(
-                  'tr',
-                  m('td', keycap('a'), '/', keycap('d')),
-                  m('td', 'Pan left/right'),
-                  ),
-              ),
-          m('h2', 'Mouse Controls'),
-          m('table',
-            m('tr', m('td', 'Click'), m('td', 'Select event')),
-            m('tr', m('td', 'Ctrl + Scroll wheel'), m('td', 'Zoom in/out')),
-            m('tr', m('td', 'Click + Drag'), m('td', 'Pan left/right')),
+    content: m(
+        '.help',
+        m('h2', 'Navigation'),
+        m(
+            'table',
+            m(
+                'tr',
+                m('td', keycap('w'), '/', keycap('s')),
+                m('td', 'Zoom in/out'),
+                ),
+            m(
+                'tr',
+                m('td', keycap('a'), '/', keycap('d')),
+                m('td', 'Pan left/right'),
+                ),
+            ),
+        m('h2', 'Mouse Controls'),
+        m('table',
+          m('tr', m('td', 'Click'), m('td', 'Select event')),
+          m('tr', m('td', 'Ctrl + Scroll wheel'), m('td', 'Zoom in/out')),
+          m('tr', m('td', 'Click + Drag'), m('td', 'Select area')),
+          m('tr', m('td', 'Shift + Click + Drag'), m('td', 'Pan left/right'))),
+        m('h2', 'Other'),
+        m(
+            'table',
             m('tr',
-              m('td', 'Shift + Click + Drag'),
-              m('td', 'Select a time span'))),
-          m('h2', 'Other'),
-          m(
-              'table',
-              m('tr',
-                m('td', keycap('f'), ' (with event selected)'),
-                m('td', 'Scroll + zoom to current selection')),
-              m('tr',
-                m('td', keycap('m'), ' (with event selected)'),
-                m('td', 'Select time span of event')),
-              m('tr', m('td', keycap('?')), m('td', 'Show help')),
-              )),
+              m('td', keycap('f'), ' (with event selected)'),
+              m('td', 'Scroll + zoom to current selection')),
+            m('tr',
+              m('td', keycap('m'), ' (with event selected)'),
+              m('td', 'Select time span of event')),
+            m('tr', m('td', keycap('?')), m('td', 'Show help')),
+            )),
     buttons: [],
   }).finally(() => {
     helpModelOpen = false;
diff --git a/ui/src/frontend/pan_and_zoom_handler.ts b/ui/src/frontend/pan_and_zoom_handler.ts
index 9d2bda1..1f504b4 100644
--- a/ui/src/frontend/pan_and_zoom_handler.ts
+++ b/ui/src/frontend/pan_and_zoom_handler.ts
@@ -44,8 +44,8 @@
 const WHEEL_ZOOM_SPEED = -0.02;
 
 const EDITING_RANGE_CURSOR = 'ew-resize';
-const SHIFT_CURSOR = 'text';
-const DEFAULT_CURSOR = 'default';
+const DRAG_CURSOR = 'text';
+const PAN_CURSOR = 'move';
 
 enum Pan {
   None = 0,
@@ -94,28 +94,29 @@
   private contentOffsetX: number;
   private onPanned: (movedPx: number) => void;
   private onZoomed: (zoomPositionPx: number, zoomRatio: number) => void;
-  private shouldDrag: (currentPx: number) => boolean;
-  private onDrag:
+  private editSelection: (currentPx: number) => boolean;
+  private onSelection:
       (dragStartX: number, dragStartY: number, prevX: number, currentX: number,
        currentY: number, editing: boolean) => void;
 
   constructor(
-      {element, contentOffsetX, onPanned, onZoomed, shouldDrag, onDrag}: {
-        element: HTMLElement,
-        contentOffsetX: number,
-        onPanned: (movedPx: number) => void,
-        onZoomed: (zoomPositionPx: number, zoomRatio: number) => void,
-        shouldDrag: (currentPx: number) => boolean,
-        onDrag:
-            (dragStartX: number, dragStartY: number, prevX: number,
-             currentX: number, currentY: number, editing: boolean) => void,
-      }) {
+      {element, contentOffsetX, onPanned, onZoomed, editSelection, onSelection}:
+          {
+            element: HTMLElement,
+            contentOffsetX: number,
+            onPanned: (movedPx: number) => void,
+            onZoomed: (zoomPositionPx: number, zoomRatio: number) => void,
+            editSelection: (currentPx: number) => boolean,
+            onSelection:
+                (dragStartX: number, dragStartY: number, prevX: number,
+                 currentX: number, currentY: number, editing: boolean) => void,
+          }) {
     this.element = element;
     this.contentOffsetX = contentOffsetX;
     this.onPanned = onPanned;
     this.onZoomed = onZoomed;
-    this.shouldDrag = shouldDrag;
-    this.onDrag = onDrag;
+    this.editSelection = editSelection;
+    this.onSelection = onSelection;
 
     document.body.addEventListener('keydown', this.boundOnKeyDown);
     document.body.addEventListener('keyup', this.boundOnKeyUp);
@@ -125,16 +126,14 @@
     let prevX = -1;
     let dragStartX = -1;
     let dragStartY = -1;
-    let drag = false;
+    let edit = false;
     new DragGestureHandler(
         this.element,
         (x, y) => {
-          // If we started our drag on a time range boundary or shift is down
-          // then we are drag selecting rather than panning.
-          if (drag || this.shiftDown) {
-            this.onDrag(dragStartX, dragStartY, prevX, x, y, !this.shiftDown);
-          } else {
+          if (this.shiftDown) {
             this.onPanned(prevX - x);
+          } else {
+            this.onSelection(dragStartX, dragStartY, prevX, x, y, edit);
           }
           prevX = x;
         },
@@ -142,19 +141,18 @@
           prevX = x;
           dragStartX = x;
           dragStartY = y;
-          drag = this.shouldDrag(x);
+          edit = this.editSelection(x);
           // Set the cursor style based on where the cursor is when the drag
           // starts.
-          if (drag) {
+          if (edit) {
             this.element.style.cursor = EDITING_RANGE_CURSOR;
-          } else if (this.shiftDown) {
-            this.element.style.cursor = SHIFT_CURSOR;
+          } else if (!this.shiftDown) {
+            this.element.style.cursor = DRAG_CURSOR;
           }
         },
         () => {
           // Reset the cursor now the drag has ended.
-          this.element.style.cursor =
-              this.shiftDown ? SHIFT_CURSOR : DEFAULT_CURSOR;
+          this.element.style.cursor = this.shiftDown ? PAN_CURSOR : DRAG_CURSOR;
           dragStartX = -1;
           dragStartY = -1;
         });
@@ -213,14 +211,13 @@
     // the cursor flickering between styles if you drag fast and get too
     // far from the current time range.
     if (e.buttons === 0) {
-      if (!this.shouldDrag(this.mousePositionX)) {
-        this.element.style.cursor =
-            this.shiftDown ? SHIFT_CURSOR : DEFAULT_CURSOR;
-      } else {
+      if (this.editSelection(this.mousePositionX)) {
         this.element.style.cursor = EDITING_RANGE_CURSOR;
+      } else {
+        this.element.style.cursor = this.shiftDown ? PAN_CURSOR : DRAG_CURSOR;
       }
     }
-    if (this.shiftDown) {
+    if (!this.shiftDown) {
       const pos = this.mousePositionX - TRACK_SHELL_WIDTH;
       const ts = globals.frontendLocalState.timeScale.pxToTime(pos);
       globals.frontendLocalState.setHoveredTimestamp(ts);
@@ -283,17 +280,15 @@
     if (down === this.shiftDown) return;
     this.shiftDown = down;
     if (this.shiftDown) {
+      globals.frontendLocalState.setHoveredTimestamp(-1);
+      this.element.style.cursor = PAN_CURSOR;
+    } else {
       if (this.mousePositionX) {
-        this.element.style.cursor = SHIFT_CURSOR;
+        this.element.style.cursor = DRAG_CURSOR;
         const pos = this.mousePositionX - TRACK_SHELL_WIDTH;
         const ts = globals.frontendLocalState.timeScale.pxToTime(pos);
         globals.frontendLocalState.setHoveredTimestamp(ts);
       }
-    } else {
-      globals.frontendLocalState.setHoveredTimestamp(-1);
-      this.element.style.cursor = DEFAULT_CURSOR;
     }
-
-    globals.frontendLocalState.setShowTimeSelectPreview(this.shiftDown);
   }
 }
diff --git a/ui/src/frontend/panel_container.ts b/ui/src/frontend/panel_container.ts
index 9d2eaa5..3c2e31f 100644
--- a/ui/src/frontend/panel_container.ts
+++ b/ui/src/frontend/panel_container.ts
@@ -234,6 +234,16 @@
     const ctx = assertExists(this.ctx);
     const canvas = assertExists(ctx.canvas);
     canvas.style.height = `${this.canvasHeight}px`;
+
+    // If're we're non-scrolling canvas and the scroll-limiter should always
+    // have the same height. Enforce this by explicitly setting the height.
+    if (!this.attrs.doesScroll) {
+      const scrollLimiter = canvas.parentElement;
+      if (scrollLimiter) {
+        scrollLimiter.style.height = `${this.canvasHeight}px`;
+      }
+    }
+
     const dpr = window.devicePixelRatio;
     // On non-MacOS if there is a solid scroll bar it can cover important
     // pixels, reduce the size of the canvas so it doesn't overlap with
diff --git a/ui/src/frontend/viewer_page.ts b/ui/src/frontend/viewer_page.ts
index 49e6d22..6dc5475 100644
--- a/ui/src/frontend/viewer_page.ts
+++ b/ui/src/frontend/viewer_page.ts
@@ -179,10 +179,10 @@
         frontendLocalState.updateVisibleTime(newSpan);
         globals.rafScheduler.scheduleRedraw();
       },
-      shouldDrag: (currentPx: number) => {
+      editSelection: (currentPx: number) => {
         return onTimeRangeBoundary(currentPx) !== null;
       },
-      onDrag: (
+      onSelection: (
           dragStartX: number,
           dragStartY: number,
           prevX: number,