Merge "Add tools/protoprofile"
diff --git a/Android.bp b/Android.bp
index 15e84a4..924cf8c 100644
--- a/Android.bp
+++ b/Android.bp
@@ -892,6 +892,7 @@
     "test/end_to_end_integrationtest.cc",
     "test/fake_producer.cc",
     "test/task_runner_thread.cc",
+    "test/task_runner_thread_delegates.cc",
     "test/test_helper.cc",
   ],
   shared_libs: [
@@ -4181,6 +4182,7 @@
     "test/end_to_end_integrationtest.cc",
     "test/fake_producer.cc",
     "test/task_runner_thread.cc",
+    "test/task_runner_thread_delegates.cc",
     "test/test_helper.cc",
   ],
   export_include_dirs: [
@@ -4206,6 +4208,7 @@
     "src/base/android_task_runner.cc",
     "src/base/test/test_task_runner.cc",
     "test/fake_producer.cc",
+    "test/task_runner_thread_delegates.cc",
   ],
   shared_libs: [
     "libprotobuf-cpp-lite",
diff --git a/Android.bp.extras b/Android.bp.extras
index 54de0c9..a2db5ed 100644
--- a/Android.bp.extras
+++ b/Android.bp.extras
@@ -14,6 +14,7 @@
     "test/end_to_end_integrationtest.cc",
     "test/fake_producer.cc",
     "test/task_runner_thread.cc",
+    "test/task_runner_thread_delegates.cc",
     "test/test_helper.cc",
   ],
   export_include_dirs: [
@@ -39,6 +40,7 @@
     "src/base/android_task_runner.cc",
     "src/base/test/test_task_runner.cc",
     "test/fake_producer.cc",
+    "test/task_runner_thread_delegates.cc",
   ],
   shared_libs: [
     "libprotobuf-cpp-lite",
diff --git a/docs/continuous-integration.md b/docs/continuous-integration.md
index 45737a5..024db52 100644
--- a/docs/continuous-integration.md
+++ b/docs/continuous-integration.md
@@ -190,6 +190,7 @@
                           "FAILED"
                           "TIMED_OUT"
                           "CANCELLED"
+                          "INTERRUPTED"
             time_ended:   "2019-07-07T12:47:22Z"
             time_queued:  "2019-07-07T12:34:22Z"
             time_started: "2019-07-07T12:34:25Z"
diff --git a/gn/standalone/sanitizers/sanitizers.gni b/gn/standalone/sanitizers/sanitizers.gni
index 95f40d1..9b0ec00 100644
--- a/gn/standalone/sanitizers/sanitizers.gni
+++ b/gn/standalone/sanitizers/sanitizers.gni
@@ -54,8 +54,3 @@
     }
   }
 }
-
-using_sanitizer = is_asan || is_lsan || is_tsan || is_msan || is_ubsan
-assert(!using_sanitizer || is_clang, "is_*san requires is_clang=true'")
-assert(!is_msan || is_linux, "msan only supported on linux")
-assert(!is_tsan || (is_linux || is_mac), "tsan only supported on linux and mac")
diff --git a/include/perfetto/ext/base/string_writer.h b/include/perfetto/ext/base/string_writer.h
index 6bc7fad..681324f 100644
--- a/include/perfetto/ext/base/string_writer.h
+++ b/include/perfetto/ext/base/string_writer.h
@@ -17,6 +17,7 @@
 #ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_WRITER_H_
 #define INCLUDE_PERFETTO_EXT_BASE_STRING_WRITER_H_
 
+#include <inttypes.h>
 #include <math.h>
 #include <stdlib.h>
 #include <string.h>
@@ -85,11 +86,12 @@
   }
 
   // Appends a hex integer to the buffer.
-  void AppendHexInt(uint32_t value) {
+  template <typename IntType>
+  void AppendHexInt(IntType value) {
     // TODO(lalitm): trying to optimize this is premature given we almost never
     // print hex ints. Reevaluate this in the future if we do print them more.
     size_t res = static_cast<size_t>(
-        snprintf(buffer_ + pos_, size_ - pos_, "%x", value));
+        snprintf(buffer_ + pos_, size_ - pos_, "%" PRIx64, value));
     PERFETTO_DCHECK(pos_ + res <= size_);
     pos_ += res;
   }
diff --git a/infra/ci/.pylintrc b/infra/ci/.pylintrc
new file mode 100644
index 0000000..684fb10
--- /dev/null
+++ b/infra/ci/.pylintrc
@@ -0,0 +1,23 @@
+[MESSAGES CONTROL]
+
+disable=
+  bad-indentation,
+  missing-docstring,
+  import-error,
+  no-name-in-module,
+  unused-argument,
+  invalid-name,
+  too-few-public-methods,
+
+[REPORTS]
+reports=no
+
+[VARIABLES]
+dummy-variables-rgx=_$|unused_
+
+[FORMAT]
+indent-string='  '
+
+# We suppress long line check for lines that contain only the URL (with or
+# without quote).
+ignore-long-lines=^\s*'?https?://\S+'?$
\ No newline at end of file
diff --git a/infra/ci/Makefile b/infra/ci/Makefile
index 92ae450..e1c8887 100644
--- a/infra/ci/Makefile
+++ b/infra/ci/Makefile
@@ -95,7 +95,7 @@
 		--zone ${ZONE} \
 		--min-num-replicas "1" \
 		--max-num-replicas "3" \
-		--cool-down-period "60" \
+		--cool-down-period "1800" \
 		--stackdriver-metric-filter "resource.type = \"global\"" \
 		--update-stackdriver-metric "custom.googleapis.com/perfetto-ci/ci_job_queue_len" \
 		--stackdriver-metric-single-instance-assignment "10"
diff --git a/infra/ci/config.py b/infra/ci/config.py
index ff659ea..795ace7 100755
--- a/infra/ci/config.py
+++ b/infra/ci/config.py
@@ -41,6 +41,7 @@
 GCS_ARTIFACTS = 'perfetto-ci-artifacts'
 
 JOB_TIMEOUT_SEC = 60 * 30
+CL_TIMEOUT_SEC = 60 * 60 * 3
 LOGS_TTL_DAYS = 15
 TRUSTED_EMAILS = '^.*@google.com$'
 
@@ -97,7 +98,6 @@
     'linux-clang-x86_64-libfuzzer': {
         'PERFETTO_TEST_GN_ARGS': 'is_debug=false is_fuzzer=true is_asan=true',
         'PERFETTO_TEST_SCRIPT': 'test/ci/fuzzer_tests.sh',
-        'SKIP_VOTING': True,
     },
     'ui-clang-x86_64-debug': {
         'PERFETTO_TEST_GN_ARGS': 'is_debug=true',
diff --git a/infra/ci/controller/controller.py b/infra/ci/controller/controller.py
index e723bd2..8b77dee 100644
--- a/infra/ci/controller/controller.py
+++ b/infra/ci/controller/controller.py
@@ -13,21 +13,20 @@
 # limitations under the License.
 
 import logging
-import httplib2
-import json
 import re
+import time
 import urllib
-import webapp2
 
 from datetime import datetime, timedelta
-from google.appengine.api import app_identity
 from google.appengine.api import taskqueue
 
+import webapp2
+
+from common_utils import req, utc_now_iso, parse_iso_time, SCOPES
 from config import DB, GERRIT_HOST, GERRIT_PROJECT, GERRIT_POLL_SEC, PROJECT
 from config import CI_SITE, GERRIT_VOTING_ENABLED, JOB_CONFIGS, LOGS_TTL_DAYS
 from config import TRUSTED_EMAILS, GCS_ARTIFACTS, JOB_TIMEOUT_SEC
-
-from common_utils import req, utc_now_iso, parse_iso_time, SCOPES
+from config import CL_TIMEOUT_SEC
 from stackdriver_metrics import STACKDRIVER_METRICS
 
 STACKDRIVER_API = 'https://monitoring.googleapis.com/v3/projects/%s' % PROJECT
@@ -38,6 +37,7 @@
 SCOPES.append('https://www.googleapis.com/auth/monitoring')
 SCOPES.append('https://www.googleapis.com/auth/monitoring.write')
 
+last_tick = 0
 
 # ------------------------------------------------------------------------------
 # Misc utility functions
@@ -104,11 +104,17 @@
 
 
 def tick(handler):
+  global last_tick
+  now = time.time()
+  # Avoid avalanching effects due to the failsafe tick job in cron.yaml.
+  if now - last_tick < GERRIT_POLL_SEC - 1:
+    return
   taskqueue.add(url='/controller/tick', queue_name='tick',
                 countdown=GERRIT_POLL_SEC)
   defer('check_new_cls')
   defer('check_pending_cls')
   defer('update_queue_metrics')
+  last_tick = now
 
 
 def check_new_cls(handler):
@@ -219,16 +225,15 @@
   cl = handler.request.get('cl')
   patchset = handler.request.get('patchset')
   first_key = '%s-0' % cl
-  last_key = '%s-%s' % (cl, int(patchset) - 1)
-  filter = 'orderBy="$key"&startAt="%s"&endAt="%s"' % (first_key, last_key)
-  cl_objs = req('GET', '%s/cls.json?%s' % (DB, filter)) or {}
-  for cl_obj in cl_objs.itervalues():
-    for job_id, job_completed in cl_obj['jobs'].iteritems():
-      if not job_completed:
-        # This is racy: workers can complete the queued jobs while we mark them
-        # as cancelled. The result of such race is still acceptable.
-        logging.info('Cancelling job for previous patchset %s', job_id)
-        defer('cancel_job', job_id=job_id)
+  last_key = '%s-z' % cl
+  filt = 'orderBy="$key"&startAt="%s"&endAt="%s"' % (first_key, last_key)
+  cl_objs = req('GET', '%s/cls.json?%s' % (DB, filt)) or {}
+  for cl_and_ps, cl_obj in cl_objs.iteritems():
+    ps = int(cl_and_ps.split('-')[-1])
+    if cl_obj.get('time_ended') or ps >= int(patchset):
+      continue
+    logging.info('Cancelling job for previous patchset %s', cl_and_ps)
+    map(lambda x: defer('cancel_job', job_id=x), cl_obj['jobs'].keys())
 
 
 def check_pending_cls(handler):
@@ -240,54 +245,71 @@
 
 
 def check_pending_cl(handler):
-  cl_and_ps = handler.request.get('cl_and_ps')
-  jobs_obj = req('GET', '%s/cls/%s/jobs.json' % (DB, cl_and_ps))
-  # Each value in the cls/1234-56/jobs dict can be:
-  # 0: if the job is still running.
-  # 1: if the job completed successfully.
-  # -1: if the job failed or timed out.
-  if all(jobs_obj.values()):
-    logging.info('All jobs completed for CL %s', cl_and_ps)
-    defer('finish_and_vote_cl', cl_and_ps=cl_and_ps)
-
-
-def finish_and_vote_cl(handler):
-  # This function can be called twice on the same CL, in the rare case when the
+  # This function can be called twice on the same CL, e.g., in the case when the
   # Presubmit-Ready label is applied after we have finished running all the
   # jobs (we run presubmit regardless, only the voting is conditioned by PR).
   cl_and_ps = handler.request.get('cl_and_ps')
   cl_obj = req('GET', '%s/cls/%s.json' % (DB, cl_and_ps))
-  logging.info('Computing vote and message for CL %s', cl_and_ps)
+  all_jobs = cl_obj.get('jobs', {}).keys()
+  pending_jobs = []
+  for job_id in all_jobs:
+    job_status = req('GET', '%s/jobs/%s/status.json' % (DB, job_id))
+    pending_jobs += [job_id] if job_status in ('QUEUED', 'STARTED') else []
+
+  if pending_jobs:
+    # If the CL has been pending for too long cancel all its jobs. Upon the next
+    # scan it will be deleted and optionally voted on.
+    t_queued = parse_iso_time(cl_obj['time_queued'])
+    age_sec = (datetime.utcnow() - t_queued).total_seconds()
+    if age_sec > CL_TIMEOUT_SEC:
+      logging.warning('Canceling %s, it has been pending for too long (%s sec)',
+                      cl_and_ps, int(age_sec))
+      map(lambda x: defer('cancel_job', job_id=x), pending_jobs)
+    return
+
+  logging.info('All jobs completed for CL %s', cl_and_ps)
+
+  # Remove the CL from the pending queue and update end time.
   patch_obj = {
       'cls_pending/%s' % cl_and_ps: {},  # = DELETE
-      'cls/%s/time_ended' % cl_and_ps: cl_obj.get('time_ended', utc_now_iso())
+      'cls/%s/time_ended' % cl_and_ps: cl_obj.get('time_ended', utc_now_iso()),
   }
   req('PATCH', '%s.json' % DB, body=patch_obj)
   defer('update_cl_metrics', src='cls/' + cl_and_ps)
+  map(lambda x: defer('update_job_metrics', job_id=x), all_jobs)
+  if cl_obj.get('wants_vote'):
+    defer('comment_and_vote_cl', cl_and_ps=cl_and_ps)
 
-  # Post Gerrit update.
+
+def comment_and_vote_cl(handler):
+  cl_and_ps = handler.request.get('cl_and_ps')
+  cl_obj = req('GET', '%s/cls/%s.json' % (DB, cl_and_ps))
+
   if cl_obj.get('voted'):
     logging.error('Already voted on CL %s', cl_and_ps)
     return
 
+  if not cl_obj['wants_vote'] or not GERRIT_VOTING_ENABLED:
+    logging.info('Skipping voting on CL %s', cl_and_ps)
+    return
+
   cl_vote = 1
   passed_jobs = []
   failed_jobs = []
   ui_links = []
-  for job_id, job_res in cl_obj['jobs'].iteritems():
-    job_config = JOB_CONFIGS.get(job_id.split('--')[-1], {})
+  for job_id in cl_obj['jobs'].keys():
+    job_obj = req('GET', '%s/jobs/%s.json' % (DB, job_id))
+    job_config = JOB_CONFIGS.get(job_obj['type'], {})
     if '-ui-' in job_id:
       ui_links.append('https://storage.googleapis.com/%s/%s/ui/index.html' % (
           GCS_ARTIFACTS, job_id))
-    if job_res > 0:
+    if job_obj['status'] == 'COMPLETED':
       passed_jobs.append(job_id)
     elif not job_config.get('SKIP_VOTING', False):
       cl_vote = -1
       failed_jobs.append(job_id)
-    defer('update_job_metrics', job_id=job_id)
 
-  msg = 'Perfetto CI is under development / testing.\n'
-  msg += 'Ignore failures and -1s coming from this account for now.\n'
+  msg = ''
   log_url = CI_SITE + '/#!/logs'
   if failed_jobs:
     msg += 'FAIL:\n'
@@ -296,12 +318,9 @@
     msg += 'PASS:\n'
     msg += ''.join([' %s/%s\n' % (log_url, job_id) for job_id in passed_jobs])
   if ui_links:
-    msg += 'UI:\n' + ''.join(' %s\n' % link for link in ui_links)
+    msg += 'ARTIFACTS:\n' + ''.join(' %s\n' % link for link in ui_links)
   body = {'labels': {'Code-Review': cl_vote}, 'message': msg}
-  if not GERRIT_VOTING_ENABLED or not cl_obj['wants_vote']:
-    logging.info('Skipping voting on CL %s', cl_and_ps)
-    return
-  logging.info('Posting results for CL %s' % cl_and_ps)
+  logging.info('Posting results for CL %s', cl_and_ps)
   url = 'https://%s/a/changes/%s/revisions/%s/review' % (
       GERRIT_HOST, cl_obj['change_id'], cl_obj['revision_id'])
   req('POST', url, body=body, gerrit=True)
@@ -343,7 +362,7 @@
                      'time_committed': utc_now_iso(time_committed),
                      'time_queued': utc_now_iso(),
                      'jobs': {},
-                     }}
+                    }}
   ref = 'refs/heads/' + branch
   append_jobs(patch_obj, src, ref, now)
   req('PATCH', DB + '.json', body=patch_obj)
@@ -359,18 +378,24 @@
     job = req('GET', '%s/jobs/%s.json' % (DB, job_id))
     time_started = parse_iso_time(job.get('time_started', utc_now_iso()))
     age = (datetime.now() - time_started).total_seconds()
-    if job.get('status') != 'STARTED' or age > JOB_TIMEOUT_SEC * 2:
+    if age > JOB_TIMEOUT_SEC * 2:
       defer('cancel_job', job_id=job_id)
 
 
 def cancel_job(handler):
+  '''Cancels a job if not completed or failed.
+
+  This function is racy: workers can complete the queued jobs while we mark them
+  as cancelled. The result of such race is still acceptable.'''
   job_id = handler.request.get('job_id')
+  status = req('GET', '%s/jobs/%s/status.json' % (DB, job_id))
   patch_obj = {
-    'jobs_running/%s' % job_id: {},  # = DELETE,
-    'jobs_queued/%s' % job_id: {},  # = DELETE,
-    'jobs/%s/status': 'CANCELLED',
-    'jobs/%s/time_ended': utc_now_iso(),
+      'jobs_running/%s' % job_id: {},  # = DELETE,
+      'jobs_queued/%s' % job_id: {},  # = DELETE,
   }
+  if status in ('QUEUED', 'STARTED'):
+    patch_obj['jobs/%s/status' % job_id] = 'CANCELLED'
+    patch_obj['jobs/%s/time_ended' % job_id] = utc_now_iso()
   req('PATCH', DB + '.json', body=patch_obj)
 
 
@@ -430,7 +455,7 @@
       'check_pending_cl': check_pending_cl,
       'check_new_cls': check_new_cls,
       'check_new_cl': check_new_cl,
-      'finish_and_vote_cl': finish_and_vote_cl,
+      'comment_and_vote_cl': comment_and_vote_cl,
       'cancel_older_jobs': cancel_older_jobs,
       'queue_postsubmit_jobs': queue_postsubmit_jobs,
       'update_job_metrics': update_job_metrics,
diff --git a/infra/ci/controller/queue.yaml b/infra/ci/controller/queue.yaml
index f55082a..1c5e53e 100644
--- a/infra/ci/controller/queue.yaml
+++ b/infra/ci/controller/queue.yaml
@@ -19,8 +19,10 @@
   retry_parameters:
     task_retry_limit: 1
 - name: deferred-jobs
-  rate: 10/s
+  rate: 50/s
   bucket_size: 10
   max_concurrent_requests: 10
   retry_parameters:
-    task_retry_limit: 3
\ No newline at end of file
+    task_retry_limit: 3
+    min_backoff_seconds: 5
+    max_backoff_seconds: 30
\ No newline at end of file
diff --git a/infra/ci/frontend/static/script.js b/infra/ci/frontend/static/script.js
index 1e68971..6a05246 100644
--- a/infra/ci/frontend/static/script.js
+++ b/infra/ci/frontend/static/script.js
@@ -337,6 +337,7 @@
     'QUEUED': 'schedule',
     'FAILED': 'bug_report',
     'CANCELLED': 'cancel',
+    'INTERRUPTED': 'cancel',
     'TIMED_OUT': 'notification_important',
   };
   const icon = ICON_MAP[jobStatus] || 'clear';
diff --git a/infra/ci/frontend/static/style.css b/infra/ci/frontend/static/style.css
index 8bdd90c..5ce48b2 100644
--- a/infra/ci/frontend/static/style.css
+++ b/infra/ci/frontend/static/style.css
@@ -266,6 +266,10 @@
     color: #ffa000;
 }
 
+.main-table tbody td.job .INTERRUPTED {
+    color: #ffa000;
+}
+
 .main-table tbody td.job .TIMED_OUT {
     color: #db4545;
 }
diff --git a/infra/ci/sandbox/Dockerfile b/infra/ci/sandbox/Dockerfile
index ef62b21..c53169a 100644
--- a/infra/ci/sandbox/Dockerfile
+++ b/infra/ci/sandbox/Dockerfile
@@ -36,6 +36,6 @@
 COPY init.sh /ci/init.sh
 
 VOLUME [ "/ci/cache", "/ci/ramdisk", "/ci/artifacts" ]
-ENTRYPOINT [ "tini", "--" ]
+ENTRYPOINT [ "tini", "-g", "--" ]
 CMD [ "bash", "/ci/init.sh" ]
 
diff --git a/infra/ci/worker/gce-startup-script.sh b/infra/ci/worker/gce-startup-script.sh
index 75fed8a..7a3081d 100644
--- a/infra/ci/worker/gce-startup-script.sh
+++ b/infra/ci/worker/gce-startup-script.sh
@@ -47,3 +47,32 @@
   --log-driver gcplogs \
   eu.gcr.io/perfetto-ci/worker
 done
+
+
+# Register a systemd service to stop worker containers gracefully on shutdown.
+cat > /etc/systemd/system/graceful_shutdown.sh <<EOF
+#!/bin/sh
+logger 'Shutting down worker containers'
+docker ps -q  -f 'name=worker-\d+$' | xargs docker stop -t 30
+exit 0
+EOF
+
+chmod 755 /etc/systemd/system/graceful_shutdown.sh
+
+# This service will cause the graceful_shutdown.sh to be invoked before stopping
+# docker, hence before tearing down any other container.
+cat > /etc/systemd/system/graceful_shutdown.service <<EOF
+[Unit]
+Description=Worker container lifecycle
+Wants=gcr-online.target docker.service
+After=gcr-online.target docker.service
+Requires=docker.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStop=/etc/systemd/system/graceful_shutdown.sh
+EOF
+
+systemctl daemon-reload
+systemctl start graceful_shutdown.service
\ No newline at end of file
diff --git a/infra/ci/worker/run_job.py b/infra/ci/worker/run_job.py
index 790084f..804420c 100755
--- a/infra/ci/worker/run_job.py
+++ b/infra/ci/worker/run_job.py
@@ -94,7 +94,7 @@
   # termination of the worker container, which dispatches a SIGTERM on stop.
   def sig_handler(sig, _):
     logging.warn('Job runner got signal %s, terminating job %s', sig, job_id)
-    subprocess.call(['sudo', 'docker', 'rm', '-f', container])
+    subprocess.call(['sudo', 'docker', 'kill', container])
     os._exit(1)  # sys.exit throws a SystemExit exception, _exit really exits.
   signal.signal(signal.SIGTERM, sig_handler)
 
@@ -103,8 +103,8 @@
 
   # SYS_PTRACE is required for gtest death tests and LSan.
   cmd = ['sudo', 'docker', 'run', '--name', container, '--hostname', container,
-         '--cap-add', 'SYS_PTRACE', '--rm', '--tmpfs', '/ramdisk:exec',
-         '--env', 'PERFETTO_TEST_JOB=%s' % job_id]
+         '--cap-add', 'SYS_PTRACE', '--rm', '--tmpfs', '/ci/ramdisk:exec',
+         '--tmpfs', '/tmp:exec', '--env', 'PERFETTO_TEST_JOB=%s' % job_id]
 
   # Propagate environment variables coming from the job config.
   for kv in [kv for kv in os.environ.items() if kv[0].startswith('PERFETTO_')]:
diff --git a/infra/ci/worker/worker.py b/infra/ci/worker/worker.py
index 06cd444..78397e6 100755
--- a/infra/ci/worker/worker.py
+++ b/infra/ci/worker/worker.py
@@ -18,20 +18,16 @@
 It also handles timeouts and graceful container termination.
 '''
 
-import httplib2
 import logging
 import os
 import random
 import signal
 import socket
 import subprocess
-import sys
 import threading
 import time
 import traceback
 
-from datetime import datetime, timedelta
-from oauth2client.client import GoogleCredentials
 from config import DB, JOB_TIMEOUT_SEC
 from common_utils import req, utc_now_iso, init_logging
 from common_utils import ConcurrentModificationError, SCOPES
@@ -85,6 +81,7 @@
   # Transactionally acquire a job. Deal with races (two workers trying to
   # acquire the same job).
   job = None
+  job_id = None
   for job_id in sorted(jobs.keys(), reverse=True):
     job = try_acquire_job(job_id)
     if job is not None:
@@ -135,22 +132,21 @@
       cancelled = True
       job_runner.terminate()
 
-  status = ('CANCELLED' if cancelled else
+  status = ('INTERRUPTED' if sigterm.is_set() else
+            'CANCELLED' if cancelled else
             'TIMED_OUT' if timed_out else
             'COMPLETED' if res == 0 else
             'FAILED')
   logging.info('Job %s %s with code %s', job_id, status, res)
 
-  # Update the DB, unless the job has been cancelled. The "is not Non"
+  # Update the DB, unless the job has been cancelled. The "is not None"
   # condition deals with a very niche case, that is, avoid creating a partial
   # job entry after doing a full clear of the DB (which is super rare, happens
   # only when re-deploying the CI).
   if polled_status is not None:
-    status_num = 1 if status == 'COMPLETED' else -1
     patch = {
-        # This updates cls/1234-56/jobs/xxx-clang-arm: 1|-1
-        '%s/jobs/%s' % (job['src'], job_id): status_num,
         'jobs/%s/status' % job_id: status,
+        'jobs/%s/exit_code' % job_id: {} if res is None else res,
         'jobs/%s/time_ended' % job_id: utc_now_iso(),
         'jobs_running/%s' % job_id: {},  # = DELETE
     }
diff --git a/protos/perfetto/trace/gpu/gpu_render_stage_event.proto b/protos/perfetto/trace/gpu/gpu_render_stage_event.proto
index c497ffa..2a85246 100644
--- a/protos/perfetto/trace/gpu/gpu_render_stage_event.proto
+++ b/protos/perfetto/trace/gpu/gpu_render_stage_event.proto
@@ -23,21 +23,18 @@
   // required. Unique ID for the event.
   optional uint64 event_id = 1;
 
-  // required. Start time of event in GPU clock.
-  optional uint64 start_time = 2;
-
   // optional. Duration in GPU clock.  If unset, this is a single time point
   // event.
-  optional uint64 duration = 3;
+  optional uint64 duration = 2;
 
   // required. ID to a hardware queue description in the specifications.
-  optional int32 hw_queue_id = 4;
+  optional int32 hw_queue_id = 3;
 
   // required. ID to a render stage description in the specifications.
-  optional int32 stage_id = 5;
+  optional int32 stage_id = 4;
 
   // required. GL context/VK device.
-  optional uint64 context = 6;
+  optional uint64 context = 5;
 
   // optional.  Additional data for the user. This may include attribs for
   // the event like resource ids, shaders etc
@@ -45,7 +42,7 @@
     optional string name = 1;
     optional string value = 2;
   }
-  repeated ExtraData extra_data = 7;
+  repeated ExtraData extra_data = 6;
 
   // The first trace packet of each session should include a Specifications
   // to enumerate all IDs that will be used.
@@ -57,9 +54,8 @@
     optional ContextSpec context_spec = 1;
 
     message Description {
-      optional int32 id = 1;
-      optional string name = 2;
-      optional string description = 3;
+      optional string name = 1;
+      optional string description = 2;
     }
 
     // Labels to categorize the hw Queue this event goes on
@@ -68,8 +64,8 @@
     // Labels to categorize render stage(binning, render, compute etc)
     repeated Description stage = 3;
   }
-  optional Specifications specifications = 8;
+  optional Specifications specifications = 7;
 
   // Extension for vendor's custom proto.
-  extensions 100 to 200;
+  extensions 100;
 }
diff --git a/protos/perfetto/trace/perfetto_trace.proto b/protos/perfetto/trace/perfetto_trace.proto
index 6c186b0..2bd1250 100644
--- a/protos/perfetto/trace/perfetto_trace.proto
+++ b/protos/perfetto/trace/perfetto_trace.proto
@@ -3544,21 +3544,18 @@
   // required. Unique ID for the event.
   optional uint64 event_id = 1;
 
-  // required. Start time of event in GPU clock.
-  optional uint64 start_time = 2;
-
   // optional. Duration in GPU clock.  If unset, this is a single time point
   // event.
-  optional uint64 duration = 3;
+  optional uint64 duration = 2;
 
   // required. ID to a hardware queue description in the specifications.
-  optional int32 hw_queue_id = 4;
+  optional int32 hw_queue_id = 3;
 
   // required. ID to a render stage description in the specifications.
-  optional int32 stage_id = 5;
+  optional int32 stage_id = 4;
 
   // required. GL context/VK device.
-  optional uint64 context = 6;
+  optional uint64 context = 5;
 
   // optional.  Additional data for the user. This may include attribs for
   // the event like resource ids, shaders etc
@@ -3566,7 +3563,7 @@
     optional string name = 1;
     optional string value = 2;
   }
-  repeated ExtraData extra_data = 7;
+  repeated ExtraData extra_data = 6;
 
   // The first trace packet of each session should include a Specifications
   // to enumerate all IDs that will be used.
@@ -3578,9 +3575,8 @@
     optional ContextSpec context_spec = 1;
 
     message Description {
-      optional int32 id = 1;
-      optional string name = 2;
-      optional string description = 3;
+      optional string name = 1;
+      optional string description = 2;
     }
 
     // Labels to categorize the hw Queue this event goes on
@@ -3589,10 +3585,10 @@
     // Labels to categorize render stage(binning, render, compute etc)
     repeated Description stage = 3;
   }
-  optional Specifications specifications = 8;
+  optional Specifications specifications = 7;
 
   // Extension for vendor's custom proto.
-  extensions 100 to 200;
+  extensions 100;
 }
 
 // End of protos/perfetto/trace/gpu/gpu_render_stage_event.proto
diff --git a/src/base/BUILD.gn b/src/base/BUILD.gn
index ac5868c..043225b 100644
--- a/src/base/BUILD.gn
+++ b/src/base/BUILD.gn
@@ -113,7 +113,6 @@
   deps = [
     ":base",
     "../../gn:default_deps",
-    "../../gn:gtest_and_gmock",
   ]
   sources = [
     "test/utils.cc",
diff --git a/src/base/test/utils.h b/src/base/test/utils.h
index 2884b57..c2125e7 100644
--- a/src/base/test/utils.h
+++ b/src/base/test/utils.h
@@ -19,10 +19,8 @@
 
 #include <string>
 
-#include <gtest/gtest.h>
 #include "perfetto/base/logging.h"
 
-#if defined(GTEST_HAS_DEATH_TEST)
 #if PERFETTO_DCHECK_IS_ON()
 
 #define EXPECT_DCHECK_DEATH(statement) EXPECT_DEATH(statement, "PERFETTO_CHECK")
@@ -36,7 +34,6 @@
     GTEST_EXECUTE_STATEMENT_(statement, "PERFETTO_CHECK")
 
 #endif  // PERFETTO_DCHECK_IS_ON()
-#endif  // defined(GTEST_HAS_DEATH_TEST)
 
 namespace perfetto {
 namespace base {
diff --git a/src/base/test/vm_test_utils.cc b/src/base/test/vm_test_utils.cc
index 58a81a6..2efa845 100644
--- a/src/base/test/vm_test_utils.cc
+++ b/src/base/test/vm_test_utils.cc
@@ -25,22 +25,24 @@
 #include <string.h>
 
 #if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <vector>
+
 #include <Windows.h>
 #include <Psapi.h>
-#else
+#else  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
 #include <sys/mman.h>
 #include <sys/stat.h>
-#endif
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
 
-#include <gtest/gtest.h>
 #include "perfetto/base/build_config.h"
+#include "perfetto/base/logging.h"
 
 namespace perfetto {
 namespace base {
 namespace vm_test_utils {
 
 bool IsMapped(void* start, size_t size) {
-  EXPECT_EQ(0u, size % kPageSize);
+  PERFETTO_CHECK(size % kPageSize == 0);
 #if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
   int retries = 5;
   int number_of_entries = 4000;  // Just a guess.
@@ -61,10 +63,7 @@
     if (QueryWorkingSet(GetCurrentProcess(), &buffer[0], buffer_size))
       break;  // Success
 
-    if (GetLastError() != ERROR_BAD_LENGTH) {
-      EXPECT_EQ(true, false);
-      return false;
-    }
+    PERFETTO_CHECK(GetLastError() == ERROR_BAD_LENGTH);
 
     number_of_entries = ws_info->NumberOfEntries;
 
@@ -72,11 +71,7 @@
     // take that into account. Increasing by 10% should generally be enough.
     number_of_entries *= 1.1;
 
-    if (--retries == 0) {
-      // If we're looping, eventually fail.
-      EXPECT_EQ(true, false);
-      return false;
-    }
+    PERFETTO_CHECK(--retries > 0);  // If we're looping, eventually fail.
   }
 
   void* end = reinterpret_cast<char*>(start) + size;
@@ -112,7 +107,7 @@
   // MacOS instead returns 0 but leaves the page_states empty.
   if (res == -1 && errno == ENOMEM)
     return false;
-  EXPECT_EQ(0, res);
+  PERFETTO_CHECK(res == 0);
   for (size_t i = 0; i < num_pages; i++) {
     if (!(page_states[i] & kIncoreMask))
       return false;
diff --git a/src/trace_processor/BUILD.gn b/src/trace_processor/BUILD.gn
index aa3ccab..08ebf91 100644
--- a/src/trace_processor/BUILD.gn
+++ b/src/trace_processor/BUILD.gn
@@ -323,12 +323,6 @@
   deps = [
     ":lib",
     "../../gn:default_deps",
-    "../../gn:default_deps",
-    "../../gn:gtest_and_gmock",
-    "../../gn:sqlite",
-    "../../protos/perfetto/trace:lite",
-    "../../protos/perfetto/trace_processor:lite",
     "../base",
-    "../base:test_support",
   ]
 }
diff --git a/src/trace_processor/export_json.cc b/src/trace_processor/export_json.cc
index 641e037..5a191a3 100644
--- a/src/trace_processor/export_json.cc
+++ b/src/trace_processor/export_json.cc
@@ -35,61 +35,12 @@
 
   ~TraceFormatWriter() { WriteFooter(); }
 
-  void WriteCompleteEvent(int64_t begin_ts_us,
-                          int64_t duration_us,
-                          int64_t thread_ts_us,
-                          int64_t thread_duration_us,
-                          const char* cat,
-                          const char* name,
-                          uint32_t tid,
-                          uint32_t pid,
-                          const Json::Value& args) {
+  void WriteCommonEvent(const Json::Value& event) {
     if (!first_event_) {
       fputs(",", output_);
     }
     Json::FastWriter writer;
-    Json::Value value;
-    value["ph"] = "X";
-    value["cat"] = cat;
-    value["name"] = name;
-    value["tid"] = Json::UInt(tid);
-    value["pid"] = Json::UInt(pid);
-    value["ts"] = Json::Int64(begin_ts_us);
-    value["dur"] = Json::Int64(duration_us);
-    if (thread_ts_us > 0) {
-      value["tts"] = Json::Int64(thread_ts_us);
-      value["tdur"] = Json::Int64(thread_duration_us);
-    }
-    value["args"] = args;
-    fputs(writer.write(value).c_str(), output_);
-    first_event_ = false;
-  }
-
-  void WriteInstantEvent(int64_t begin_ts_us,
-                         int64_t thread_ts_us,
-                         const char* scope,
-                         const char* cat,
-                         const char* name,
-                         uint32_t tid,
-                         uint32_t pid,
-                         const Json::Value& args) {
-    if (!first_event_) {
-      fputs(",", output_);
-    }
-    Json::FastWriter writer;
-    Json::Value value;
-    value["ph"] = "i";
-    value["s"] = scope;
-    value["cat"] = cat;
-    value["name"] = name;
-    value["tid"] = Json::UInt(tid);
-    value["pid"] = Json::UInt(pid);
-    value["ts"] = Json::Int64(begin_ts_us);
-    if (thread_ts_us > 0) {
-      value["tts"] = Json::Int64(thread_ts_us);
-    }
-    value["args"] = args;
-    fputs(writer.write(value).c_str(), output_);
+    fputs(writer.write(event).c_str(), output_);
     first_event_ = false;
   }
 
@@ -117,58 +68,6 @@
     first_event_ = false;
   }
 
-  void WriteAsyncInstant(int64_t begin_ts_us,
-                         const char* cat,
-                         const char* name,
-                         uint32_t pid,
-                         const Json::Value& async_id,
-                         const Json::Value& args) {
-    if (!first_event_) {
-      fputs(",", output_);
-    }
-    Json::FastWriter writer;
-    Json::Value value;
-    value["pid"] = pid;
-    value["ph"] = "n";
-    value["cat"] = cat;
-    value["name"] = name;
-    value["id2"] = async_id;
-    value["ts"] = Json::Int64(begin_ts_us);
-    value["args"] = args;
-    fputs(writer.write(value).c_str(), output_);
-    first_event_ = false;
-  }
-
-  void WriteAsyncStartAndEnd(int64_t begin_ts_us,
-                             int64_t duration_us,
-                             const char* cat,
-                             const char* name,
-                             uint32_t pid,
-                             const Json::Value& async_id,
-                             const Json::Value& args) {
-    if (!first_event_) {
-      fputs(",", output_);
-    }
-    Json::FastWriter writer;
-    Json::Value value;
-    value["pid"] = pid;
-    value["ph"] = "b";
-    value["cat"] = cat;
-    value["name"] = name;
-    value["id2"] = async_id;
-    value["ts"] = Json::Int64(begin_ts_us);
-    value["args"] = args;
-    fputs(writer.write(value).c_str(), output_);
-
-    fputs(",", output_);
-    value["ph"] = "e";
-    value["ts"] = Json::Int64(begin_ts_us + duration_us);
-    value.removeMember("args");
-    fputs(writer.write(value).c_str(), output_);
-
-    first_event_ = false;
-  }
-
   void AppendTelemetryMetadataString(const char* key, const char* value) {
     metadata_["telemetry"][key].append(value);
   }
@@ -356,31 +255,39 @@
   ArgsBuilder args_builder(storage);
   const auto& slices = storage->nestable_slices();
   for (uint32_t i = 0; i < slices.slice_count(); ++i) {
-    int64_t begin_ts_us = slices.start_ns()[i] / 1000;
-    int64_t duration_us = slices.durations()[i] / 1000;
-    const char* cat = string_pool.Get(slices.categories()[i]).c_str();
-    const char* name = string_pool.Get(slices.names()[i]).c_str();
+    Json::Value event;
+    event["ts"] = Json::Int64(slices.start_ns()[i] / 1000);
+    event["cat"] = string_pool.Get(slices.categories()[i]).c_str();
+    event["name"] = string_pool.Get(slices.names()[i]).c_str();
+    event["pid"] = 0;
     Json::Value args = args_builder.GetArgs(slices.arg_set_ids()[i]);
+    if (!args.empty()) {
+      event["args"] = args;
+    }
 
     if (slices.types()[i] == RefType::kRefTrack) {  // Async event.
       uint32_t track_id = static_cast<uint32_t>(slices.refs()[i]);
       VirtualTrackScope scope = storage->virtual_tracks().scopes()[track_id];
       UniquePid upid = storage->virtual_tracks().upids()[track_id];
-      Json::Value async_id;
-      uint32_t pid = 0;
       if (scope == VirtualTrackScope::kGlobal) {
-        async_id["global"] = PrintUint64(track_id);
+        event["id2"]["global"] = PrintUint64(track_id);
       } else {
-        async_id["local"] = PrintUint64(track_id);
-        pid = storage->GetProcess(upid).pid;
+        event["id2"]["local"] = PrintUint64(track_id);
+        event["pid"] = storage->GetProcess(upid).pid;
       }
       if (slices.durations()[i] == 0) {  // Instant async event.
-        writer->WriteAsyncInstant(begin_ts_us, cat, name, pid, async_id, args);
+        event["ph"] = "n";
+        writer->WriteCommonEvent(event);
       } else {  // Async start and end.
-        writer->WriteAsyncStartAndEnd(begin_ts_us, duration_us, cat, name, pid,
-                                      async_id, args);
+        event["ph"] = "b";
+        writer->WriteCommonEvent(event);
+        event["ph"] = "e";
+        event["ts"] =
+            Json::Int64((slices.start_ns()[i] + slices.durations()[i]) / 1000);
+        event.removeMember("args");
+        writer->WriteCommonEvent(event);
       }
-    } else {                             // Sync event.
+    } else {  // Sync event.
       const auto& thread_slices = storage->thread_slices();
       int64_t thread_ts_us = 0;
       int64_t thread_duration_us = 0;
@@ -393,37 +300,45 @@
             thread_slices.thread_duration_ns()[*thread_slice_row] / 1000;
       }
       if (slices.durations()[i] == 0) {  // Instant event.
-        uint32_t pid = 0;
-        uint32_t tid = 0;
-        std::string instant_scope;
+        event["ph"] = "i";
         if (slices.types()[i] == RefType::kRefUtid) {
           UniqueTid utid = static_cast<UniqueTid>(slices.refs()[i]);
           auto thread = storage->GetThread(utid);
-          pid = thread.upid ? storage->GetProcess(*thread.upid).pid : 0;
-          tid = thread.tid;
-          instant_scope = "t";
+          if (thread.upid) {
+            event["pid"] = storage->GetProcess(*thread.upid).pid;
+          }
+          if (thread_ts_us > 0) {
+            event["tts"] = Json::Int64(thread_ts_us);
+          }
+          event["tid"] = thread.tid;
+          event["s"] = "t";
         } else if (slices.types()[i] == RefType::kRefUpid) {
           UniquePid upid = static_cast<UniquePid>(slices.refs()[i]);
-          pid = storage->GetProcess(upid).pid;
-          instant_scope = "p";
+          event["pid"] = storage->GetProcess(upid).pid;
+          event["s"] = "p";
         } else if (slices.types()[i] == RefType::kRefNoRef) {
-          instant_scope = "g";
+          event["s"] = "g";
         } else {
           return kResultWrongRefType;
         }
-        writer->WriteInstantEvent(begin_ts_us, thread_ts_us,
-                                  instant_scope.c_str(), cat, name, tid, pid,
-                                  args);
+        writer->WriteCommonEvent(event);
       } else {  // Complete event.
         if (slices.types()[i] != RefType::kRefUtid) {
           return kResultWrongRefType;
         }
+        event["ph"] = "X";
+        event["dur"] = Json::Int64(slices.durations()[i] / 1000);
         UniqueTid utid = static_cast<UniqueTid>(slices.refs()[i]);
         auto thread = storage->GetThread(utid);
-        uint32_t pid = thread.upid ? storage->GetProcess(*thread.upid).pid : 0;
-        writer->WriteCompleteEvent(begin_ts_us, duration_us, thread_ts_us,
-                                   thread_duration_us, cat, name, thread.tid,
-                                   pid, args);
+        event["tid"] = thread.tid;
+        if (thread.upid) {
+          event["pid"] = storage->GetProcess(*thread.upid).pid;
+        }
+        if (thread_ts_us > 0) {
+          event["tts"] = Json::Int64(thread_ts_us);
+          event["tdur"] = Json::Int64(thread_duration_us);
+        }
+        writer->WriteCommonEvent(event);
       }
     }
   }
diff --git a/src/trace_processor/export_json_unittest.cc b/src/trace_processor/export_json_unittest.cc
index db1c40e..b511616 100644
--- a/src/trace_processor/export_json_unittest.cc
+++ b/src/trace_processor/export_json_unittest.cc
@@ -125,8 +125,10 @@
 
 TEST(ExportJsonTest, WrongRefType) {
   TraceStorage storage;
-  storage.mutable_nestable_slices()->AddSlice(0, 0, 0, RefType::kRefCpuId, 0, 0,
-                                              0, 0, 0);
+  StringId cat_id = storage.InternString("cat");
+  StringId name_id = storage.InternString("name");
+  storage.mutable_nestable_slices()->AddSlice(0, 0, 0, RefType::kRefCpuId,
+                                              cat_id, name_id, 0, 0, 0);
 
   base::TempFile temp_file = base::TempFile::Create();
   FILE* output = fopen(temp_file.path().c_str(), "w+");
diff --git a/src/trace_processor/ftrace_utils.cc b/src/trace_processor/ftrace_utils.cc
index c0f0e96..081ba94 100644
--- a/src/trace_processor/ftrace_utils.cc
+++ b/src/trace_processor/ftrace_utils.cc
@@ -111,7 +111,7 @@
   }
 }
 
-TaskState::TaskStateStr TaskState::ToString() const {
+TaskState::TaskStateStr TaskState::ToString(char separator) const {
   PERFETTO_CHECK(is_valid());
 
   char buffer[32];
@@ -124,26 +124,56 @@
   } else {
     if (state_ & Atom::kInterruptibleSleep)
       buffer[pos++] = 'S';
-    if (state_ & Atom::kUninterruptibleSleep)
+    if (state_ & Atom::kUninterruptibleSleep) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'D';  // D for (D)isk sleep
-    if (state_ & Atom::kStopped)
+    }
+    if (state_ & Atom::kStopped) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'T';
-    if (state_ & Atom::kTraced)
+    }
+    if (state_ & Atom::kTraced) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 't';
-    if (state_ & Atom::kExitDead)
+    }
+    if (state_ & Atom::kExitDead) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'X';
-    if (state_ & Atom::kExitZombie)
+    }
+    if (state_ & Atom::kExitZombie) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'Z';
-    if (state_ & Atom::kTaskDead)
+    }
+    if (state_ & Atom::kTaskDead) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'x';
-    if (state_ & Atom::kWakeKill)
+    }
+    if (state_ & Atom::kWakeKill) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'K';
-    if (state_ & Atom::kWaking)
+    }
+    if (state_ & Atom::kWaking) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'W';
-    if (state_ & Atom::kParked)
+    }
+    if (state_ & Atom::kParked) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'P';
-    if (state_ & Atom::kNoLoad)
+    }
+    if (state_ & Atom::kNoLoad) {
+      if (separator && pos != 0)
+        buffer[pos++] = separator;
       buffer[pos++] = 'N';
+    }
   }
 
   if (is_kernel_preempt())
diff --git a/src/trace_processor/ftrace_utils.h b/src/trace_processor/ftrace_utils.h
index bb6ba4c..fa8d48f 100644
--- a/src/trace_processor/ftrace_utils.h
+++ b/src/trace_processor/ftrace_utils.h
@@ -68,9 +68,10 @@
   bool is_valid() const { return state_ & kValid; }
 
   // Returns the string representation of this (valid) TaskState. This array
-  // is null terminated.
+  // is null terminated. |seperator| specifies if a separator should be printed
+  // between the atoms (default: \0 meaning no separator).
   // Note: This function CHECKs that |is_valid()| is true.
-  TaskStateStr ToString() const;
+  TaskStateStr ToString(char separator = '\0') const;
 
   // Returns the raw state this class was created from.
   uint16_t raw_state() const {
diff --git a/src/trace_processor/proto_trace_parser.cc b/src/trace_processor/proto_trace_parser.cc
index 727c172..9070695 100644
--- a/src/trace_processor/proto_trace_parser.cc
+++ b/src/trace_processor/proto_trace_parser.cc
@@ -35,7 +35,6 @@
 #include "src/trace_processor/heap_profile_tracker.h"
 #include "src/trace_processor/metadata.h"
 #include "src/trace_processor/process_tracker.h"
-#include "src/trace_processor/slice_tracker.h"
 #include "src/trace_processor/syscall_tracker.h"
 #include "src/trace_processor/systrace_parser.h"
 #include "src/trace_processor/trace_processor_context.h"
@@ -65,6 +64,7 @@
 #include "perfetto/trace/ftrace/systrace.pbzero.h"
 #include "perfetto/trace/ftrace/task.pbzero.h"
 #include "perfetto/trace/gpu/gpu_counter_event.pbzero.h"
+#include "perfetto/trace/gpu/gpu_render_stage_event.pbzero.h"
 #include "perfetto/trace/interned_data/interned_data.pbzero.h"
 #include "perfetto/trace/perfetto/perfetto_metatrace.pbzero.h"
 #include "perfetto/trace/power/battery_counters.pbzero.h"
@@ -79,7 +79,6 @@
 #include "perfetto/trace/trace_packet.pbzero.h"
 #include "perfetto/trace/track_event/debug_annotation.pbzero.h"
 #include "perfetto/trace/track_event/task_execution.pbzero.h"
-#include "perfetto/trace/track_event/track_event.pbzero.h"
 
 namespace perfetto {
 namespace trace_processor {
@@ -252,7 +251,38 @@
       task_file_name_args_key_id_(
           context->storage->InternString("task.posted_from.file_name")),
       task_function_name_args_key_id_(
-          context->storage->InternString("task.posted_from.function_name")) {
+          context->storage->InternString("task.posted_from.function_name")),
+      raw_legacy_event_id_(
+          context->storage->InternString("track_event.legacy_event")),
+      legacy_event_category_key_id_(
+          context->storage->InternString("legacy_event.category")),
+      legacy_event_name_key_id_(
+          context->storage->InternString("legacy_event.name")),
+      legacy_event_phase_key_id_(
+          context->storage->InternString("legacy_event.phase")),
+      legacy_event_duration_ns_key_id_(
+          context->storage->InternString("legacy_event.duration_ns")),
+      legacy_event_thread_timestamp_ns_key_id_(
+          context->storage->InternString("legacy_event.thread_timestamp_ns")),
+      legacy_event_thread_duration_ns_key_id_(
+          context->storage->InternString("legacy_event.thread_duration_ns")),
+      legacy_event_use_async_tts_key_id_(
+          context->storage->InternString("legacy_event.use_async_tts")),
+      legacy_event_global_id_key_id_(
+          context->storage->InternString("legacy_event.global_id")),
+      legacy_event_local_id_key_id_(
+          context->storage->InternString("legacy_event.local_id")),
+      legacy_event_id_scope_key_id_(
+          context->storage->InternString("legacy_event.id_scope")),
+      legacy_event_bind_id_key_id_(
+          context->storage->InternString("legacy_event.bind_id")),
+      legacy_event_bind_to_enclosing_key_id_(
+          context->storage->InternString("legacy_event.bind_to_enclosing")),
+      legacy_event_flow_direction_key_id_(
+          context->storage->InternString("legacy_event.flow_direction")),
+      flow_direction_value_in_id_(context->storage->InternString("in")),
+      flow_direction_value_out_id_(context->storage->InternString("out")),
+      flow_direction_value_inout_id_(context->storage->InternString("inout")) {
   for (const auto& name : BuildMeminfoCounterNames()) {
     meminfo_strs_id_.emplace_back(context->storage->InternString(name));
   }
@@ -400,6 +430,10 @@
     ParseGpuCounterEvent(ts, packet.gpu_counter_event());
   }
 
+  if (packet.has_gpu_render_stage_event()) {
+    ParseGpuRenderStageEvent(ts, packet.gpu_render_stage_event());
+  }
+
   if (packet.has_packages_list()) {
     ParseAndroidPackagesList(packet.packages_list());
   }
@@ -1599,10 +1633,7 @@
     }
   }
 
-  // TODO(eseckler): Handle thread instruction counts, legacy event attributes,
-  // legacy event types (async S, T, p, F phases, flow events, sample events,
-  // object events, metadata events, memory dumps, mark events, clock sync
-  // events, context events, counter events), ...
+  // TODO(eseckler): Handle thread instruction counts.
 
   auto args_callback = [this, &event, &sequence_state](
                            ArgsTracker* args_tracker, RowId row_id) {
@@ -1730,15 +1761,34 @@
     case 'b': {  // TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN
       TrackId track_id = context_->virtual_track_tracker->GetOrCreateTrack(
           {vtrack_scope, vtrack_upid, id, id_scope}, name_id);
-      slice_tracker->Begin(ts, track_id, RefType::kRefTrack, category_id,
-                           name_id, args_callback);
+      auto opt_slice_id =
+          slice_tracker->Begin(ts, track_id, RefType::kRefTrack, category_id,
+                               name_id, args_callback);
+      // For the time beeing, we only create vtrack slice rows if we need to
+      // store thread timestamps/counters.
+      if (legacy_event.use_async_tts() && opt_slice_id.has_value()) {
+        auto* vtrack_slices = storage->mutable_virtual_track_slices();
+        PERFETTO_DCHECK(!vtrack_slices->slice_count() ||
+                        vtrack_slices->slice_ids().back() <
+                            opt_slice_id.value());
+        vtrack_slices->AddVirtualTrackSlice(opt_slice_id.value(), tts,
+                                            kPendingThreadDuration,
+                                            /*thread_instruction_count=*/0,
+                                            /*thread_instruction_delta=*/0);
+      }
       break;
     }
     case 'e': {  // TRACE_EVENT_PHASE_NESTABLE_ASYNC_END
       TrackId track_id = context_->virtual_track_tracker->GetOrCreateTrack(
           {vtrack_scope, vtrack_upid, id, id_scope}, name_id);
-      slice_tracker->End(ts, track_id, RefType::kRefTrack, category_id, name_id,
-                         args_callback);
+      auto opt_slice_id =
+          slice_tracker->End(ts, track_id, RefType::kRefTrack, category_id,
+                             name_id, args_callback);
+      if (legacy_event.use_async_tts() && opt_slice_id.has_value()) {
+        auto* vtrack_slices = storage->mutable_virtual_track_slices();
+        vtrack_slices->UpdateThreadDurationForSliceId(opt_slice_id.value(),
+                                                      tts);
+      }
       break;
     }
     case 'n': {  // TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT
@@ -1747,14 +1797,24 @@
       int64_t duration_ns = 0;
       TrackId track_id = context_->virtual_track_tracker->GetOrCreateTrack(
           {vtrack_scope, vtrack_upid, id, id_scope}, name_id);
-      slice_tracker->Scoped(ts, track_id, RefType::kRefTrack, category_id,
-                            name_id, duration_ns, args_callback);
+      auto opt_slice_id =
+          slice_tracker->Scoped(ts, track_id, RefType::kRefTrack, category_id,
+                                name_id, duration_ns, args_callback);
+      if (legacy_event.use_async_tts() && opt_slice_id.has_value()) {
+        auto* vtrack_slices = storage->mutable_virtual_track_slices();
+        PERFETTO_DCHECK(!vtrack_slices->slice_count() ||
+                        vtrack_slices->slice_ids().back() <
+                            opt_slice_id.value());
+        vtrack_slices->AddVirtualTrackSlice(opt_slice_id.value(), tts,
+                                            duration_ns,
+                                            /*thread_instruction_count=*/0,
+                                            /*thread_instruction_delta=*/0);
+      }
       break;
     }
     case 'M': {  // TRACE_EVENT_PHASE_METADATA (process and thread names).
-      // For now, we just compare the event name and assume there's a single
-      // argument in these events with the name of the process/thread.
-      // TODO(eseckler): Use names from process/thread descriptors instead.
+      // Parse process and thread names from correspondingly named events.
+      // TODO(eseckler): Also consider names from process/thread descriptors.
       NullTermStringView event_name = storage->GetString(name_id);
       PERFETTO_DCHECK(event_name.data());
       if (strcmp(event_name.c_str(), "thread_name") == 0) {
@@ -1768,7 +1828,9 @@
           break;
         auto thread_name_id = context_->storage->InternString(thread_name);
         procs->UpdateThreadName(tid, thread_name_id);
-      } else if (strcmp(event_name.c_str(), "process_name") == 0) {
+        break;
+      }
+      if (strcmp(event_name.c_str(), "process_name") == 0) {
         auto it = event.debug_annotations();
         if (!it)
           break;
@@ -1778,12 +1840,128 @@
         if (!process_name.size)
           break;
         procs->SetProcessMetadata(pid, base::nullopt, process_name);
+        break;
       }
+      // Other metadata events are proxied via the raw table for JSON export.
+      ParseLegacyEventAsRawEvent(ts, tts, utid, category_id, name_id,
+                                 legacy_event, args_callback);
       break;
     }
+    default: {
+      // Other events are proxied via the raw table for JSON export.
+      ParseLegacyEventAsRawEvent(ts, tts, utid, category_id, name_id,
+                                 legacy_event, args_callback);
+    }
   }
 }
 
+void ProtoTraceParser::ParseLegacyEventAsRawEvent(
+    int64_t ts,
+    int64_t tts,
+    UniqueTid utid,
+    StringId category_id,
+    StringId name_id,
+    const protos::pbzero::TrackEvent::LegacyEvent::Decoder& legacy_event,
+    SliceTracker::SetArgsCallback args_callback) {
+  using LegacyEvent = protos::pbzero::TrackEvent::LegacyEvent;
+
+  RowId row_id = context_->storage->mutable_raw_events()->AddRawEvent(
+      ts, raw_legacy_event_id_, 0, utid);
+  ArgsTracker args(context_);
+  args.AddArg(row_id, legacy_event_category_key_id_,
+              legacy_event_category_key_id_, Variadic::String(category_id));
+  args.AddArg(row_id, legacy_event_name_key_id_, legacy_event_name_key_id_,
+              Variadic::String(name_id));
+  args.AddArg(row_id, legacy_event_phase_key_id_, legacy_event_phase_key_id_,
+              Variadic::Integer(legacy_event.phase()));
+
+  if (legacy_event.has_duration_us()) {
+    args.AddArg(row_id, legacy_event_duration_ns_key_id_,
+                legacy_event_duration_ns_key_id_,
+                Variadic::Integer(legacy_event.duration_us() * 1000));
+  }
+
+  if (tts) {
+    args.AddArg(row_id, legacy_event_thread_timestamp_ns_key_id_,
+                legacy_event_thread_timestamp_ns_key_id_,
+                Variadic::Integer(tts));
+    if (legacy_event.has_thread_duration_us()) {
+      args.AddArg(row_id, legacy_event_thread_duration_ns_key_id_,
+                  legacy_event_thread_duration_ns_key_id_,
+                  Variadic::Integer(legacy_event.thread_duration_us() * 1000));
+    }
+  }
+
+  // TODO(eseckler): Handle thread_instruction_count/delta.
+
+  if (legacy_event.use_async_tts()) {
+    args.AddArg(row_id, legacy_event_use_async_tts_key_id_,
+                legacy_event_use_async_tts_key_id_, Variadic::Boolean(true));
+  }
+
+  bool has_id = false;
+  if (legacy_event.has_unscoped_id()) {
+    args.AddArg(row_id, legacy_event_global_id_key_id_,
+                legacy_event_global_id_key_id_,
+                Variadic::UnsignedInteger(legacy_event.unscoped_id()));
+    has_id = true;
+  } else if (legacy_event.has_global_id()) {
+    args.AddArg(row_id, legacy_event_global_id_key_id_,
+                legacy_event_global_id_key_id_,
+                Variadic::UnsignedInteger(legacy_event.global_id()));
+    has_id = true;
+  } else if (legacy_event.has_local_id()) {
+    args.AddArg(row_id, legacy_event_local_id_key_id_,
+                legacy_event_local_id_key_id_,
+                Variadic::UnsignedInteger(legacy_event.local_id()));
+    has_id = true;
+  }
+
+  if (has_id && legacy_event.has_id_scope() && legacy_event.id_scope().size) {
+    args.AddArg(row_id, legacy_event_id_scope_key_id_,
+                legacy_event_id_scope_key_id_,
+                Variadic::String(
+                    context_->storage->InternString(legacy_event.id_scope())));
+  }
+
+  // TODO(eseckler): Parse legacy flow events into flow events table once we
+  // have a design for it.
+  if (legacy_event.has_bind_id()) {
+    args.AddArg(row_id, legacy_event_bind_id_key_id_,
+                legacy_event_bind_id_key_id_,
+                Variadic::UnsignedInteger(legacy_event.bind_id()));
+  }
+
+  if (legacy_event.bind_to_enclosing()) {
+    args.AddArg(row_id, legacy_event_bind_to_enclosing_key_id_,
+                legacy_event_bind_to_enclosing_key_id_,
+                Variadic::Boolean(true));
+  }
+
+  if (legacy_event.flow_direction()) {
+    StringId value;
+    switch (legacy_event.flow_direction()) {
+      case LegacyEvent::FLOW_IN:
+        value = flow_direction_value_in_id_;
+        break;
+      case LegacyEvent::FLOW_OUT:
+        value = flow_direction_value_out_id_;
+        break;
+      case LegacyEvent::FLOW_INOUT:
+        value = flow_direction_value_inout_id_;
+        break;
+      default:
+        PERFETTO_FATAL("Unknown flow direction: %d",
+                       legacy_event.flow_direction());
+        break;
+    }
+    args.AddArg(row_id, legacy_event_flow_direction_key_id_,
+                legacy_event_flow_direction_key_id_, Variadic::String(value));
+  }
+
+  args_callback(&args, row_id);
+}
+
 void ProtoTraceParser::ParseDebugAnnotationArgs(
     ConstBytes debug_annotation,
     ProtoIncrementalState::PacketSequenceState* sequence_state,
@@ -2098,6 +2276,47 @@
   }
 }
 
+void ProtoTraceParser::ParseGpuRenderStageEvent(int64_t ts, ConstBytes blob) {
+  protos::pbzero::GpuRenderStageEvent::Decoder event(blob.data, blob.size);
+
+  if (event.has_specifications()) {
+    protos::pbzero::GpuRenderStageEvent_Specifications::Decoder spec(
+        event.specifications().data, event.specifications().size);
+    for (auto it = spec.hw_queue(); it; ++it) {
+      protos::pbzero::GpuRenderStageEvent_Specifications_Description::Decoder
+          hw_queue(it->data(), it->size());
+      if (hw_queue.has_name()) {
+        // TODO: create vtrack for each HW queue when it's ready.
+        gpu_hw_queue_ids_.emplace_back(
+            context_->storage->InternString(hw_queue.name()));
+      }
+    }
+    for (auto it = spec.stage(); it; ++it) {
+      protos::pbzero::GpuRenderStageEvent_Specifications_Description::Decoder
+          stage(it->data(), it->size());
+      if (stage.has_name()) {
+        gpu_render_stage_ids_.emplace_back(
+            context_->storage->InternString(stage.name()));
+      }
+    }
+  }
+
+  if (event.has_event_id()) {
+    size_t stage_id = static_cast<size_t>(event.stage_id());
+    StringId stage_name;
+    if (stage_id < gpu_render_stage_ids_.size()) {
+      stage_name = gpu_render_stage_ids_[stage_id];
+    } else {
+      char buffer[64];
+      snprintf(buffer, 64, "render stage(%zu)", stage_id);
+      stage_name = context_->storage->InternString(buffer);
+    }
+    context_->slice_tracker->Scoped(
+        ts, event.hw_queue_id(), RefType::kRefGpuId, 0, /* cat */
+        stage_name, static_cast<int64_t>(event.duration()));
+  }
+}
+
 void ProtoTraceParser::ParseAndroidPackagesList(ConstBytes blob) {
   protos::pbzero::PackagesList::Decoder pkg_list(blob.data, blob.size);
   context_->storage->SetStats(stats::packages_list_has_read_errors,
diff --git a/src/trace_processor/proto_trace_parser.h b/src/trace_processor/proto_trace_parser.h
index 781d2a2..6f2f390 100644
--- a/src/trace_processor/proto_trace_parser.h
+++ b/src/trace_processor/proto_trace_parser.h
@@ -26,10 +26,13 @@
 #include "perfetto/protozero/field.h"
 #include "src/trace_processor/ftrace_descriptors.h"
 #include "src/trace_processor/proto_incremental_state.h"
+#include "src/trace_processor/slice_tracker.h"
 #include "src/trace_processor/trace_blob_view.h"
 #include "src/trace_processor/trace_parser.h"
 #include "src/trace_processor/trace_storage.h"
 
+#include "perfetto/trace/track_event/track_event.pbzero.h"
+
 namespace perfetto {
 namespace trace_processor {
 
@@ -99,6 +102,14 @@
                        int64_t tts,
                        ProtoIncrementalState::PacketSequenceState*,
                        ConstBytes);
+  void ParseLegacyEventAsRawEvent(
+      int64_t ts,
+      int64_t tts,
+      UniqueTid utid,
+      StringId category_id,
+      StringId name_id,
+      const protos::pbzero::TrackEvent::LegacyEvent::Decoder& legacy_event,
+      SliceTracker::SetArgsCallback args_callback);
   void ParseDebugAnnotationArgs(
       ConstBytes debug_annotation,
       ProtoIncrementalState::PacketSequenceState* sequence_state,
@@ -117,6 +128,7 @@
   void ParseChromeBenchmarkMetadata(ConstBytes);
   void ParseMetatraceEvent(int64_t ts, ConstBytes);
   void ParseGpuCounterEvent(int64_t ts, ConstBytes);
+  void ParseGpuRenderStageEvent(int64_t ts, ConstBytes);
   void ParseAndroidPackagesList(ConstBytes);
 
  private:
@@ -153,11 +165,30 @@
   const StringId metatrace_id_;
   const StringId task_file_name_args_key_id_;
   const StringId task_function_name_args_key_id_;
+  const StringId raw_legacy_event_id_;
+  const StringId legacy_event_category_key_id_;
+  const StringId legacy_event_name_key_id_;
+  const StringId legacy_event_phase_key_id_;
+  const StringId legacy_event_duration_ns_key_id_;
+  const StringId legacy_event_thread_timestamp_ns_key_id_;
+  const StringId legacy_event_thread_duration_ns_key_id_;
+  const StringId legacy_event_use_async_tts_key_id_;
+  const StringId legacy_event_global_id_key_id_;
+  const StringId legacy_event_local_id_key_id_;
+  const StringId legacy_event_id_scope_key_id_;
+  const StringId legacy_event_bind_id_key_id_;
+  const StringId legacy_event_bind_to_enclosing_key_id_;
+  const StringId legacy_event_flow_direction_key_id_;
+  const StringId flow_direction_value_in_id_;
+  const StringId flow_direction_value_out_id_;
+  const StringId flow_direction_value_inout_id_;
   std::vector<StringId> meminfo_strs_id_;
   std::vector<StringId> vmstat_strs_id_;
   std::vector<StringId> rss_members_;
   std::vector<StringId> power_rails_strs_id_;
   std::unordered_map<uint32_t, const StringId> gpu_counter_ids_;
+  std::vector<StringId> gpu_hw_queue_ids_;
+  std::vector<StringId> gpu_render_stage_ids_;
 
   struct FtraceMessageStrings {
     // The string id of name of the event field (e.g. sched_switch's id).
diff --git a/src/trace_processor/proto_trace_parser_unittest.cc b/src/trace_processor/proto_trace_parser_unittest.cc
index f365a84..0f72816 100644
--- a/src/trace_processor/proto_trace_parser_unittest.cc
+++ b/src/trace_processor/proto_trace_parser_unittest.cc
@@ -211,6 +211,25 @@
     ResetTraceBuffers();
   }
 
+  bool HasArg(ArgSetId set_id, StringId key_id, Variadic value) {
+    const auto& args = storage_->args();
+    auto rows =
+        std::equal_range(args.set_ids().begin(), args.set_ids().end(), set_id);
+    for (; rows.first != rows.second; rows.first++) {
+      size_t index = static_cast<size_t>(
+          std::distance(args.set_ids().begin(), rows.first));
+      if (args.keys()[index] == key_id) {
+        EXPECT_EQ(args.flat_keys()[index], key_id);
+        EXPECT_EQ(args.arg_values()[index], value);
+        if (args.flat_keys()[index] == key_id &&
+            args.arg_values()[index] == value) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
  protected:
   std::unique_ptr<protozero::ScatteredHeapBuffer> heap_buf_;
   std::unique_ptr<protozero::ScatteredStreamWriter> stream_writer_;
@@ -891,6 +910,7 @@
     legacy_event->set_name_iid(1);
     legacy_event->set_phase('b');
     legacy_event->set_global_id(10);
+    legacy_event->set_use_async_tts(true);
 
     auto* interned_data = packet->set_interned_data();
     auto cat1 = interned_data->add_event_categories();
@@ -911,6 +931,7 @@
     legacy_event->set_name_iid(1);
     legacy_event->set_phase('e');
     legacy_event->set_global_id(10);
+    legacy_event->set_use_async_tts(true);
   }
   {
     auto* packet = trace_.add_packet();
@@ -957,7 +978,8 @@
       .WillOnce(Return(1));
   EXPECT_CALL(*storage_, InternString(base::StringView("ev1")))
       .WillOnce(Return(2));
-  EXPECT_CALL(*slice_, Begin(1010000, 0, RefType::kRefTrack, 1, 2, _));
+  EXPECT_CALL(*slice_, Begin(1010000, 0, RefType::kRefTrack, 1, 2, _))
+      .WillOnce(Return(0u));
 
   EXPECT_CALL(*storage_, InternString(base::StringView("cat2")))
       .WillOnce(Return(3));
@@ -965,7 +987,8 @@
       .WillOnce(Return(4));
   EXPECT_CALL(*slice_, Scoped(1015000, 0, RefType::kRefTrack, 3, 4, 0, _));
 
-  EXPECT_CALL(*slice_, End(1020000, 0, RefType::kRefTrack, 1, 2, _));
+  EXPECT_CALL(*slice_, End(1020000, 0, RefType::kRefTrack, 1, 2, _))
+      .WillOnce(Return(0u));
 
   EXPECT_CALL(*storage_, InternString(base::StringView("scope1")))
       .WillOnce(Return(5));
@@ -983,6 +1006,11 @@
             VirtualTrackScope::kProcess);
   EXPECT_EQ(storage_->virtual_tracks().upids()[0], 0u);
   EXPECT_EQ(storage_->virtual_tracks().upids()[1], 1u);
+
+  EXPECT_EQ(storage_->virtual_track_slices().slice_count(), 1u);
+  EXPECT_EQ(storage_->virtual_track_slices().slice_ids()[0], 0u);
+  EXPECT_EQ(storage_->virtual_track_slices().thread_timestamp_ns()[0], 2005000);
+  EXPECT_EQ(storage_->virtual_track_slices().thread_duration_ns()[0], 5000);
 }
 
 TEST_F(ProtoTraceParserTest, TrackEventWithoutIncrementalStateReset) {
@@ -1509,6 +1537,121 @@
   context_.sorter->ExtractEventsForced();
 }
 
+TEST_F(ProtoTraceParserTest, TrackEventParseLegacyEventIntoRawTable) {
+  context_.sorter.reset(new TraceSorter(
+      &context_, std::numeric_limits<int64_t>::max() /*window size*/));
+
+  {
+    auto* packet = trace_.add_packet();
+    packet->set_trusted_packet_sequence_id(1);
+    packet->set_incremental_state_cleared(true);
+    auto* thread_desc = packet->set_thread_descriptor();
+    thread_desc->set_pid(15);
+    thread_desc->set_tid(16);
+    thread_desc->set_reference_timestamp_us(1000);
+    thread_desc->set_reference_thread_time_us(2000);
+  }
+  {
+    auto* packet = trace_.add_packet();
+    packet->set_trusted_packet_sequence_id(1);
+    auto* event = packet->set_track_event();
+    event->set_timestamp_delta_us(10);   // absolute: 1010.
+    event->set_thread_time_delta_us(5);  // absolute: 2005.
+    event->add_category_iids(1);
+
+    auto* legacy_event = event->set_legacy_event();
+    legacy_event->set_name_iid(1);
+    // Represents a phase that isn't parsed into regular trace processor tables.
+    legacy_event->set_phase('?');
+    legacy_event->set_duration_us(23);
+    legacy_event->set_thread_duration_us(15);
+    legacy_event->set_global_id(99u);
+    legacy_event->set_id_scope("scope1");
+    legacy_event->set_use_async_tts('?');
+    legacy_event->set_bind_id(98);
+    legacy_event->set_bind_to_enclosing(true);
+    legacy_event->set_flow_direction(
+        protos::pbzero::TrackEvent::LegacyEvent::FLOW_INOUT);
+
+    auto* annotation1 = event->add_debug_annotations();
+    annotation1->set_name_iid(1);
+    annotation1->set_uint_value(10u);
+
+    auto* interned_data = packet->set_interned_data();
+    auto cat1 = interned_data->add_event_categories();
+    cat1->set_iid(1);
+    cat1->set_name("cat1");
+    auto ev1 = interned_data->add_legacy_event_names();
+    ev1->set_iid(1);
+    ev1->set_name("ev1");
+    auto an1 = interned_data->add_debug_annotation_names();
+    an1->set_iid(1);
+    an1->set_name("an1");
+  }
+
+  Tokenize();
+
+  EXPECT_CALL(*process_, UpdateThread(16, 15)).WillOnce(Return(1));
+
+  InSequence in_sequence;  // Below slices should be sorted by timestamp.
+
+  EXPECT_CALL(*storage_, InternString(base::StringView("cat1")))
+      .WillOnce(Return(1));
+  EXPECT_CALL(*storage_, InternString(base::StringView("ev1")))
+      .WillOnce(Return(2));
+  EXPECT_CALL(*storage_, InternString(base::StringView("scope1")))
+      .Times(2)
+      .WillRepeatedly(Return(3));
+
+  EXPECT_CALL(*storage_, InternString(base::StringView("debug.an1")))
+      .WillOnce(Return(4));
+
+  context_.sorter->ExtractEventsForced();
+
+  ::testing::Mock::VerifyAndClearExpectations(storage_);
+
+  // Verify raw_events and args contents.
+  const auto& raw_events = storage_->raw_events();
+  EXPECT_EQ(raw_events.raw_event_count(), 1u);
+  EXPECT_EQ(raw_events.timestamps()[0], 1010000);
+  EXPECT_EQ(raw_events.name_ids()[0],
+            storage_->InternString("track_event.legacy_event"));
+  EXPECT_EQ(raw_events.cpus()[0], 0u);
+  EXPECT_EQ(raw_events.utids()[0], 1u);
+  EXPECT_EQ(raw_events.arg_set_ids()[0], 1u);
+
+  EXPECT_EQ(storage_->args().args_count(), 13u);
+
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.category"),
+                     Variadic::String(1u)));
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.name"),
+                     Variadic::String(2u)));
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.phase"),
+                     Variadic::Integer('?')));
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.duration_ns"),
+                     Variadic::Integer(23000)));
+  EXPECT_TRUE(HasArg(1u,
+                     storage_->InternString("legacy_event.thread_timestamp_ns"),
+                     Variadic::Integer(2005000)));
+  EXPECT_TRUE(HasArg(1u,
+                     storage_->InternString("legacy_event.thread_duration_ns"),
+                     Variadic::Integer(15000)));
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.use_async_tts"),
+                     Variadic::Boolean(true)));
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.global_id"),
+                     Variadic::UnsignedInteger(99u)));
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.id_scope"),
+                     Variadic::String(3u)));
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.bind_id"),
+                     Variadic::UnsignedInteger(98u)));
+  EXPECT_TRUE(HasArg(1u,
+                     storage_->InternString("legacy_event.bind_to_enclosing"),
+                     Variadic::Boolean(true)));
+  EXPECT_TRUE(HasArg(1u, storage_->InternString("legacy_event.flow_direction"),
+                     Variadic::String(storage_->InternString("inout"))));
+  EXPECT_TRUE(HasArg(1u, 4u, Variadic::UnsignedInteger(10u)));
+}
+
 TEST_F(ProtoTraceParserTest, LoadChromeBenchmarkMetadata) {
   static const char kName[] = "name";
   static const char kTag1[] = "tag1";
diff --git a/src/trace_processor/raw_table.cc b/src/trace_processor/raw_table.cc
index 33bf65c..501eee4 100644
--- a/src/trace_processor/raw_table.cc
+++ b/src/trace_processor/raw_table.cc
@@ -28,6 +28,7 @@
 #include "perfetto/trace/ftrace/ftrace.pbzero.h"
 #include "perfetto/trace/ftrace/ftrace_event.pbzero.h"
 #include "perfetto/trace/ftrace/sched.pbzero.h"
+#include "perfetto/trace/ftrace/workqueue.pbzero.h"
 
 namespace perfetto {
 namespace trace_processor {
@@ -140,7 +141,7 @@
     write_arg(SS::kPrevStateFieldNumber - 1, [writer](const Variadic& value) {
       PERFETTO_DCHECK(value.type == Variadic::Type::kInt);
       auto state = static_cast<uint16_t>(value.int_value);
-      writer->AppendString(ftrace_utils::TaskState(state).ToString().data());
+      writer->AppendString(ftrace_utils::TaskState(state).ToString('|').data());
     });
     writer->AppendLiteral(" ==>");
     write_arg(SS::kNextCommFieldNumber - 1, write_value);
@@ -190,13 +191,13 @@
     write_value_at_index(
         BT::kFlagsFieldNumber - 1, [writer](const Variadic& value) {
           PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
-          writer->AppendHexInt(static_cast<uint32_t>(value.uint_value));
+          writer->AppendHexInt(value.uint_value);
         });
     writer->AppendString(" code=0x");
     write_value_at_index(
         BT::kCodeFieldNumber - 1, [writer](const Variadic& value) {
           PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
-          writer->AppendHexInt(static_cast<uint32_t>(value.uint_value));
+          writer->AppendHexInt(value.uint_value);
         });
     return;
   } else if (event_name == "binder_transaction_alloc_buf") {
@@ -251,6 +252,67 @@
     writer->AppendChar(' ');
     writer->AppendString(str.c_str(), chars_to_print);
     return;
+  } else if (event_name == "sched_blocked_reason") {
+    using SBR = protos::pbzero::SchedBlockedReasonFtraceEvent;
+    write_arg(SBR::kPidFieldNumber - 1, write_value);
+    write_arg(SBR::kIoWaitFieldNumber - 1, write_value);
+    write_arg(SBR::kCallerFieldNumber - 1, [writer](const Variadic& value) {
+      PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
+      writer->AppendHexInt(value.uint_value);
+    });
+    return;
+  } else if (event_name == "workqueue_activate_work") {
+    using WAW = protos::pbzero::WorkqueueActivateWorkFtraceEvent;
+    writer->AppendString(" work struct ");
+    write_value_at_index(WAW::kWorkFieldNumber - 1,
+                         [writer](const Variadic& value) {
+                           PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
+                           writer->AppendHexInt(value.uint_value);
+                         });
+    return;
+  } else if (event_name == "workqueue_execute_start") {
+    using WES = protos::pbzero::WorkqueueExecuteStartFtraceEvent;
+    writer->AppendString(" work struct ");
+    write_value_at_index(WES::kWorkFieldNumber - 1,
+                         [writer](const Variadic& value) {
+                           PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
+                           writer->AppendHexInt(value.uint_value);
+                         });
+    writer->AppendString(": function ");
+    write_value_at_index(WES::kFunctionFieldNumber - 1,
+                         [writer](const Variadic& value) {
+                           PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
+                           writer->AppendHexInt(value.uint_value);
+                         });
+    return;
+  } else if (event_name == "workqueue_execute_end") {
+    using WE = protos::pbzero::WorkqueueExecuteEndFtraceEvent;
+    writer->AppendString(" work struct ");
+    write_value_at_index(WE::kWorkFieldNumber - 1,
+                         [writer](const Variadic& value) {
+                           PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
+                           writer->AppendHexInt(value.uint_value);
+                         });
+    return;
+  } else if (event_name == "workqueue_queue_work") {
+    using WQW = protos::pbzero::WorkqueueQueueWorkFtraceEvent;
+    writer->AppendString(" work struct=");
+    write_value_at_index(WQW::kWorkFieldNumber - 1,
+                         [writer](const Variadic& value) {
+                           PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
+                           writer->AppendHexInt(value.uint_value);
+                         });
+    write_arg(WQW::kFunctionFieldNumber - 1, [writer](const Variadic& value) {
+      PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
+      writer->AppendHexInt(value.uint_value);
+    });
+    write_arg(WQW::kWorkqueueFieldNumber - 1, [writer](const Variadic& value) {
+      PERFETTO_DCHECK(value.type == Variadic::Type::kUint);
+      writer->AppendHexInt(value.uint_value);
+    });
+    write_value_at_index(WQW::kReqCpuFieldNumber - 1, write_value);
+    write_value_at_index(WQW::kCpuFieldNumber - 1, write_value);
+    return;
   }
 
   uint32_t arg = 0;
diff --git a/src/trace_processor/trace_storage.h b/src/trace_processor/trace_storage.h
index c3463be..d65b435 100644
--- a/src/trace_processor/trace_storage.h
+++ b/src/trace_processor/trace_storage.h
@@ -387,13 +387,61 @@
       return slice_count() - 1;
     }
 
-    void set_thread_duration_ns(uint32_t index, int64_t thread_duration_ns) {
-      thread_duration_ns_[index] = thread_duration_ns;
+    uint32_t slice_count() const {
+      return static_cast<uint32_t>(slice_ids_.size());
     }
 
-    void set_thread_instruction_delta(uint32_t index,
-                                      int64_t thread_instruction_delta) {
-      thread_instruction_deltas_[index] = thread_instruction_delta;
+    const std::deque<uint32_t>& slice_ids() const { return slice_ids_; }
+    const std::deque<int64_t>& thread_timestamp_ns() const {
+      return thread_timestamp_ns_;
+    }
+    const std::deque<int64_t>& thread_duration_ns() const {
+      return thread_duration_ns_;
+    }
+    const std::deque<int64_t>& thread_instruction_counts() const {
+      return thread_instruction_counts_;
+    }
+    const std::deque<int64_t>& thread_instruction_deltas() const {
+      return thread_instruction_deltas_;
+    }
+
+    base::Optional<uint32_t> FindRowForSliceId(uint32_t slice_id) const {
+      auto it =
+          std::lower_bound(slice_ids().begin(), slice_ids().end(), slice_id);
+      if (it != slice_ids().end() && *it == slice_id) {
+        return static_cast<uint32_t>(std::distance(slice_ids().begin(), it));
+      }
+      return base::nullopt;
+    }
+
+    void UpdateThreadDurationForSliceId(uint32_t slice_id,
+                                        int64_t end_thread_timestamp_ns) {
+      uint32_t row = *FindRowForSliceId(slice_id);
+      int64_t begin_ns = thread_timestamp_ns_[row];
+      thread_duration_ns_[row] = end_thread_timestamp_ns - begin_ns;
+    }
+
+   private:
+    std::deque<uint32_t> slice_ids_;
+    std::deque<int64_t> thread_timestamp_ns_;
+    std::deque<int64_t> thread_duration_ns_;
+    std::deque<int64_t> thread_instruction_counts_;
+    std::deque<int64_t> thread_instruction_deltas_;
+  };
+
+  class VirtualTrackSlices {
+   public:
+    inline uint32_t AddVirtualTrackSlice(uint32_t slice_id,
+                                         int64_t thread_timestamp_ns,
+                                         int64_t thread_duration_ns,
+                                         int64_t thread_instruction_count,
+                                         int64_t thread_instruction_delta) {
+      slice_ids_.emplace_back(slice_id);
+      thread_timestamp_ns_.emplace_back(thread_timestamp_ns);
+      thread_duration_ns_.emplace_back(thread_duration_ns);
+      thread_instruction_counts_.emplace_back(thread_instruction_count);
+      thread_instruction_deltas_.emplace_back(thread_instruction_delta);
+      return slice_count() - 1;
     }
 
     uint32_t slice_count() const {
@@ -1041,6 +1089,13 @@
   const ThreadSlices& thread_slices() const { return thread_slices_; }
   ThreadSlices* mutable_thread_slices() { return &thread_slices_; }
 
+  const VirtualTrackSlices& virtual_track_slices() const {
+    return virtual_track_slices_;
+  }
+  VirtualTrackSlices* mutable_virtual_track_slices() {
+    return &virtual_track_slices_;
+  }
+
   const CounterDefinitions& counter_definitions() const {
     return counter_definitions_;
   }
@@ -1161,6 +1216,10 @@
   // Additional attributes for threads slices (sub-type of NestableSlices).
   ThreadSlices thread_slices_;
 
+  // Additional attributes for virtual track slices (sub-type of
+  // NestableSlices).
+  VirtualTrackSlices virtual_track_slices_;
+
   // The type of counters in the trace. Can be thought of as the "metadata".
   CounterDefinitions counter_definitions_;
 
diff --git a/test/BUILD.gn b/test/BUILD.gn
index 8805cec..69a7513 100644
--- a/test/BUILD.gn
+++ b/test/BUILD.gn
@@ -110,6 +110,7 @@
   sources = [
     "fake_producer.cc",
     "fake_producer.h",
+    "task_runner_thread_delegates.cc",
     "task_runner_thread_delegates.h",
   ]
 }
diff --git a/test/end_to_end_shared_memory_fuzzer.cc b/test/end_to_end_shared_memory_fuzzer.cc
index 3ac92bb..c92842b 100644
--- a/test/end_to_end_shared_memory_fuzzer.cc
+++ b/test/end_to_end_shared_memory_fuzzer.cc
@@ -98,7 +98,7 @@
   void StopDataSource(DataSourceInstanceID) override {}
   void OnTracingSetup() override {}
   void Flush(FlushRequestID, const DataSourceInstanceID*, size_t) override {}
-  void ClearIncrementalState(const DataSourceInstanceID*, size_t) {}
+  void ClearIncrementalState(const DataSourceInstanceID*, size_t) override {}
 
  private:
   const std::string name_;
diff --git a/test/fake_producer.cc b/test/fake_producer.cc
index 944c8cb..0df0b71 100644
--- a/test/fake_producer.cc
+++ b/test/fake_producer.cc
@@ -19,7 +19,6 @@
 #include <condition_variable>
 #include <mutex>
 
-#include <gtest/gtest.h>
 #include "perfetto/base/logging.h"
 #include "perfetto/ext/base/time.h"
 #include "perfetto/ext/base/utils.h"
@@ -58,7 +57,7 @@
 
 void FakeProducer::OnDisconnect() {
   PERFETTO_DCHECK_THREAD(thread_checker_);
-  FAIL() << "Producer unexpectedly disconnected from the service";
+  PERFETTO_FATAL("Producer unexpectedly disconnected from the service");
 }
 
 void FakeProducer::SetupDataSource(DataSourceInstanceID,
diff --git a/test/task_runner_thread_delegates.cc b/test/task_runner_thread_delegates.cc
new file mode 100644
index 0000000..291482f
--- /dev/null
+++ b/test/task_runner_thread_delegates.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "test/task_runner_thread_delegates.h"
+
+namespace perfetto {
+
+ServiceDelegate::~ServiceDelegate() = default;
+ProbesProducerDelegate::~ProbesProducerDelegate() = default;
+FakeProducerDelegate::~FakeProducerDelegate() = default;
+
+}  // namespace perfetto
diff --git a/test/task_runner_thread_delegates.h b/test/task_runner_thread_delegates.h
index dc19c0a..bea384a9 100644
--- a/test/task_runner_thread_delegates.h
+++ b/test/task_runner_thread_delegates.h
@@ -29,7 +29,7 @@
   ServiceDelegate(const std::string& producer_socket,
                   const std::string& consumer_socket)
       : producer_socket_(producer_socket), consumer_socket_(consumer_socket) {}
-  ~ServiceDelegate() override = default;
+  ~ServiceDelegate() override;
 
   void Initialize(base::TaskRunner* task_runner) override {
     svc_ = ServiceIPCHost::CreateInstance(task_runner);
@@ -49,7 +49,7 @@
  public:
   ProbesProducerDelegate(const std::string& producer_socket)
       : producer_socket_(producer_socket) {}
-  ~ProbesProducerDelegate() override = default;
+  ~ProbesProducerDelegate() override;
 
   void Initialize(base::TaskRunner* task_runner) override {
     producer_.reset(new ProbesProducer);
@@ -69,7 +69,7 @@
       : producer_socket_(producer_socket),
         setup_callback_(std::move(setup_callback)),
         connect_callback_(std::move(connect_callback)) {}
-  ~FakeProducerDelegate() override = default;
+  ~FakeProducerDelegate() override;
 
   void Initialize(base::TaskRunner* task_runner) override {
     producer_.reset(new FakeProducer("android.perfetto.FakeProducer"));
diff --git a/test/test_helper.cc b/test/test_helper.cc
index a64794e..7ad5cba 100644
--- a/test/test_helper.cc
+++ b/test/test_helper.cc
@@ -16,7 +16,6 @@
 
 #include "test/test_helper.h"
 
-#include <gtest/gtest.h>
 #include "perfetto/ext/traced/traced.h"
 #include "perfetto/ext/tracing/core/trace_packet.h"
 #include "test/task_runner_thread_delegates.h"
@@ -52,7 +51,7 @@
 }
 
 void TestHelper::OnDisconnect() {
-  FAIL() << "Consumer unexpectedly disconnected from the service";
+  PERFETTO_FATAL("Consumer unexpectedly disconnected from the service");
 }
 
 void TestHelper::OnTracingDisabled() {
@@ -62,14 +61,14 @@
 void TestHelper::OnTraceData(std::vector<TracePacket> packets, bool has_more) {
   for (auto& encoded_packet : packets) {
     protos::TracePacket packet;
-    ASSERT_TRUE(encoded_packet.Decode(&packet));
+    PERFETTO_CHECK(encoded_packet.Decode(&packet));
     if (packet.has_clock_snapshot() || packet.has_trace_config() ||
         packet.has_trace_stats() || !packet.synchronization_marker().empty() ||
         packet.has_system_info()) {
       continue;
     }
-    ASSERT_EQ(protos::TracePacket::kTrustedUid,
-              packet.optional_trusted_uid_case());
+    PERFETTO_CHECK(packet.optional_trusted_uid_case() ==
+                   protos::TracePacket::kTrustedUid);
     trace_.push_back(std::move(packet));
   }
 
diff --git a/test/trace_processor/gpu_render_stages.out b/test/trace_processor/gpu_render_stages.out
new file mode 100644
index 0000000..551fe5b
--- /dev/null
+++ b/test/trace_processor/gpu_render_stages.out
@@ -0,0 +1,10 @@
+"ts","dur","ref","ref_type","name","depth","arg_set_id"
+10,5,1,"gpu","stage 1",0,0
+20,5,0,"gpu","stage 2",0,0
+30,5,1,"gpu","stage 0",0,0
+40,5,0,"gpu","stage 1",0,0
+50,5,1,"gpu","stage 2",0,0
+60,5,0,"gpu","stage 0",0,0
+70,5,1,"gpu","stage 1",0,0
+80,5,0,"gpu","stage 2",0,0
+90,5,1,"gpu","stage 0",0,0
diff --git a/test/trace_processor/gpu_render_stages.sql b/test/trace_processor/gpu_render_stages.sql
new file mode 100644
index 0000000..e55094e
--- /dev/null
+++ b/test/trace_processor/gpu_render_stages.sql
@@ -0,0 +1 @@
+select "ts","dur","ref","ref_type","name","depth","arg_set_id" from internal_slice order by "ts";
diff --git a/test/trace_processor/index b/test/trace_processor/index
index b526566..f4a1805 100644
--- a/test/trace_processor/index
+++ b/test/trace_processor/index
@@ -109,5 +109,6 @@
 # Unsigned integers
 print_systrace_unsigned.py print_systrace.sql print_systrace_unsigned.out
 
-# GPU counters
+# GPU trace tests
 ../data/gpu_counters.pb gpu_counters.sql gpu_counters.out
+../data/gpu_trace.pb gpu_render_stages.sql gpu_render_stages.out
diff --git a/tools/heap_profile b/tools/heap_profile
index 09a6911..0579d99 100755
--- a/tools/heap_profile
+++ b/tools/heap_profile
@@ -148,8 +148,10 @@
                       default=8 * 1048576)
   parser.add_argument("--block-client", help="When buffer is full, block the "
                       "client to wait for buffer space. Use with caution as "
-                      "this can significantly slow down the client.",
-                      action="store_true")
+                      "this can significantly slow down the client. "
+                      "This is the default", action="store_true")
+  parser.add_argument("--no-block-client", help="When buffer is full, stop the "
+                      "profile early.", action="store_true")
   parser.add_argument("--idle-allocations", help="Keep track of how many "
                       "bytes were unused since the last dump, per "
                       "callstack", action="store_true")
@@ -167,6 +169,10 @@
   args = parser.parse_args()
 
   fail = False
+  if args.block_client and args.no_block_client:
+    print("FATAL: Both block-client and no-block-client given.",
+            file=sys.stderr)
+    fail = True
   if args.pid is None and args.name is None:
     print("FATAL: Neither PID nor NAME given.", file=sys.stderr)
     fail = True
@@ -187,7 +193,7 @@
     fail = True
 
   target_cfg = ""
-  if args.block_client:
+  if not args.no_block_client:
     target_cfg += "block_client: true\n"
   if args.idle_allocations:
     target_cfg += "idle_allocations: true\n"
diff --git a/tools/install-build-deps b/tools/install-build-deps
index fb8263c..9ad78b3 100755
--- a/tools/install-build-deps
+++ b/tools/install-build-deps
@@ -195,8 +195,8 @@
 
   # Example traces for regression tests.
   ('buildtools/test_data.zip',
-   'https://storage.googleapis.com/perfetto/test-data-20190708-155403.zip',
-   'a9b260a2c3118caa4209e4097da6a851c8fd5797',
+   'https://storage.googleapis.com/perfetto/test-data-20190710-183108.zip',
+   'a37e5e7153ae5f0ade0a413add1404b184c57fcc',
    'all',
   ),
 
diff --git a/tools/trace_to_text/main.cc b/tools/trace_to_text/main.cc
index a320124..47c3af3 100644
--- a/tools/trace_to_text/main.cc
+++ b/tools/trace_to_text/main.cc
@@ -16,6 +16,8 @@
 
 #include <fstream>
 #include <iostream>
+#include <limits>
+#include <vector>
 
 #include "perfetto/base/logging.h"
 #include "tools/trace_to_text/trace_to_profile.h"
@@ -37,7 +39,7 @@
 
 int Usage(const char* argv0) {
   printf(
-      "Usage: %s systrace|json|text|profile [trace.pb] "
+      "Usage: %s systrace|json|text|profile [--truncate] [trace.pb] "
       "[trace.txt]\n",
       argv0);
   return 1;
@@ -46,20 +48,29 @@
 }  // namespace
 
 int main(int argc, char** argv) {
+  uint64_t file_size_limit = std::numeric_limits<uint64_t>::max();
+  std::vector<const char*> positional_args;
+  bool should_truncate_trace = false;
   for (int i = 1; i < argc; i++) {
     if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--version") == 0) {
       printf("%s\n", PERFETTO_GET_GIT_REVISION());
       return 0;
+    } else if (strcmp(argv[i], "-t") == 0 ||
+               strcmp(argv[i], "--truncate") == 0) {
+      should_truncate_trace = true;
+      file_size_limit = 1024u * 1024u * 50u;
+    } else {
+      positional_args.push_back(argv[i]);
     }
   }
 
-  if (argc < 2)
+  if (positional_args.size() < 1)
     return Usage(argv[0]);
 
   std::istream* input_stream;
   std::ifstream file_istream;
-  if (argc > 2) {
-    const char* file_path = argv[2];
+  if (positional_args.size() > 1) {
+    const char* file_path = positional_args[1];
     file_istream.open(file_path, std::ios_base::in | std::ios_base::binary);
     if (!file_istream.is_open())
       PERFETTO_FATAL("Could not open %s", file_path);
@@ -76,8 +87,8 @@
 
   std::ostream* output_stream;
   std::ofstream file_ostream;
-  if (argc > 3) {
-    const char* file_path = argv[3];
+  if (positional_args.size() > 2) {
+    const char* file_path = positional_args[2];
     file_ostream.open(file_path, std::ios_base::out | std::ios_base::trunc);
     if (!file_ostream.is_open())
       PERFETTO_FATAL("Could not open %s", file_path);
@@ -86,14 +97,22 @@
     output_stream = &std::cout;
   }
 
-  std::string format(argv[1]);
+  std::string format(positional_args[0]);
 
   if (format == "json")
     return perfetto::trace_to_text::TraceToSystrace(input_stream, output_stream,
+                                                    file_size_limit,
                                                     /*wrap_in_json=*/true);
   if (format == "systrace")
     return perfetto::trace_to_text::TraceToSystrace(input_stream, output_stream,
+                                                    file_size_limit,
                                                     /*wrap_in_json=*/false);
+  if (should_truncate_trace) {
+    PERFETTO_ELOG(
+        "--truncate is unsupported for text|profile|symbolize format.");
+    return 1;
+  }
+
   if (format == "text")
     return perfetto::trace_to_text::TraceToText(input_stream, output_stream);
 
diff --git a/tools/trace_to_text/trace_to_systrace.cc b/tools/trace_to_text/trace_to_systrace.cc
index 53a873b..820ce69 100644
--- a/tools/trace_to_text/trace_to_systrace.cc
+++ b/tools/trace_to_text/trace_to_systrace.cc
@@ -174,6 +174,7 @@
 
 int TraceToSystrace(std::istream* input,
                     std::ostream* output,
+                    uint64_t file_size_limit,
                     bool wrap_in_json) {
   trace_processor::Config config;
   std::unique_ptr<trace_processor::TraceProcessor> tp =
@@ -190,7 +191,8 @@
   constexpr int kStderrRate = 128;
 #endif
   uint64_t file_size = 0;
-  for (int i = 0;; i++) {
+
+  for (int i = 0; file_size < file_size_limit; i++) {
     if (i % kStderrRate == 0) {
       fprintf(stderr, "Loading trace %.2f MB" PROGRESS_CHAR, file_size / 1.0e6);
       fflush(stderr);
@@ -273,14 +275,32 @@
     if (wrap_in_json) {
       for (uint32_t i = 0; line[i] != '\0'; i++) {
         char c = line[i];
-        if (c == '\n') {
-          writer->AppendLiteral("\\n");
-          continue;
+        switch (c) {
+          case '\n':
+            writer->AppendLiteral("\\n");
+            break;
+          case '\f':
+            writer->AppendLiteral("\\f");
+            break;
+          case '\b':
+            writer->AppendLiteral("\\b");
+            break;
+          case '\r':
+            writer->AppendLiteral("\\r");
+            break;
+          case '\t':
+            writer->AppendLiteral("\\t");
+            break;
+          case '\\':
+            writer->AppendLiteral("\\\\");
+            break;
+          case '"':
+            writer->AppendLiteral("\\\"");
+            break;
+          default:
+            writer->AppendChar(c);
+            break;
         }
-
-        if (c == '\\' || c == '"')
-          writer->AppendChar('\\');
-        writer->AppendChar(c);
       }
       writer->AppendChar('\\');
       writer->AppendChar('n');
diff --git a/tools/trace_to_text/trace_to_systrace.h b/tools/trace_to_text/trace_to_systrace.h
index fbb6b38..366d10c 100644
--- a/tools/trace_to_text/trace_to_systrace.h
+++ b/tools/trace_to_text/trace_to_systrace.h
@@ -24,12 +24,9 @@
 
 int TraceToSystrace(std::istream* input,
                     std::ostream* output,
+                    uint64_t file_size_limit,
                     bool wrap_in_json);
 
-int TraceToSystraceLegacy(std::istream* input,
-                          std::ostream* output,
-                          bool wrap_in_json);
-
 }  // namespace trace_to_text
 }  // namespace perfetto
 
diff --git a/ui/src/common/actions.ts b/ui/src/common/actions.ts
index c7263de..f7dec2e 100644
--- a/ui/src/common/actions.ts
+++ b/ui/src/common/actions.ts
@@ -83,9 +83,10 @@
     state.videoEnabled = true;
   },
 
-  convertTraceToJson(_: StateDraft, args: {file: File}): void {
-    ConvertTrace(args.file);
-  },
+  convertTraceToJson(_: StateDraft, args: {file: File, truncate: boolean}):
+      void {
+        ConvertTrace(args.file, args.truncate);
+      },
 
   openTraceFromUrl(state: StateDraft, args: {url: string}): void {
     clearTraceState(state);
@@ -113,7 +114,8 @@
 
   addTrack(state: StateDraft, args: {
     id?: string; engineId: string; kind: string; name: string;
-    trackGroup?: string; config: {};
+    trackGroup?: string;
+    config: {};
   }): void {
     const id = args.id !== undefined ? args.id : `${state.nextId++}`;
     state.tracks[id] = {
@@ -293,7 +295,9 @@
     }
   },
 
-  addNote(state: StateDraft, args: {timestamp: number, color: string, isMovie: boolean}): void {
+  addNote(
+      state: StateDraft,
+      args: {timestamp: number, color: string, isMovie: boolean}): void {
     const id = `${state.nextId++}`;
     state.notes[id] = {
       id,
diff --git a/ui/src/controller/trace_converter.ts b/ui/src/controller/trace_converter.ts
index 865ee50..d45970b 100644
--- a/ui/src/controller/trace_converter.ts
+++ b/ui/src/controller/trace_converter.ts
@@ -17,7 +17,7 @@
 
 import {globals} from './globals';
 
-export function ConvertTrace(trace: Blob) {
+export function ConvertTrace(trace: Blob, truncate: boolean) {
   const mod = trace_to_text({
     noInitialRun: true,
     locateFile: (s: string) => s,
@@ -26,7 +26,11 @@
     onRuntimeInitialized: () => {
       updateStatus('Converting trace');
       const outPath = '/trace.json';
-      mod.callMain(['json', '/fs/trace.proto', outPath]);
+      if (truncate) {
+        mod.callMain(['json', '--truncate', '/fs/trace.proto', outPath]);
+      } else {
+        mod.callMain(['json', '/fs/trace.proto', outPath]);
+      }
       updateStatus('Trace conversion completed');
       const fsNode = mod.FS.lookupPath(outPath).node;
       const data = fsNode.contents.buffer;
diff --git a/ui/src/frontend/sidebar.ts b/ui/src/frontend/sidebar.ts
index 285d118..f56d719 100644
--- a/ui/src/frontend/sidebar.ts
+++ b/ui/src/frontend/sidebar.ts
@@ -102,11 +102,6 @@
     expanded: true,
     items: [
       {t: 'Open trace file', a: popupFileSelectionDialog, i: 'folder_open'},
-      {
-        t: 'Open with legacy UI',
-        a: popupFileSelectionDialogOldUI,
-        i: 'folder_open'
-      },
       {t: 'Record new trace', a: navigateRecord, i: 'fiber_smart_record'},
       {t: 'Show timeline', a: navigateViewer, i: 'line_style'},
       {
@@ -124,6 +119,23 @@
     ],
   },
   {
+    title: 'Legacy UI',
+    expanded: true,
+    summary: 'Open trace with legacy UI',
+    items: [
+      {
+        t: 'Open with legacy UI',
+        a: popupFileSelectionDialogOldUI,
+        i: 'folder_open'
+      },
+      {
+        t: 'Truncate and open with legacy UI',
+        a: popupFileSelectionDialogOldUITruncate,
+        i: 'flip'
+      },
+    ],
+  },
+  {
     title: 'Example Traces',
     expanded: true,
     summary: 'Open an example trace',
@@ -223,16 +235,26 @@
   e.preventDefault();
   delete getFileElement().dataset['useCatapultLegacyUi'];
   delete getFileElement().dataset['video'];
+  delete getFileElement().dataset['truncate'];
   getFileElement().click();
 }
 
 function popupFileSelectionDialogOldUI(e: Event) {
   e.preventDefault();
   delete getFileElement().dataset['video'];
+  delete getFileElement().dataset['truncate'];
   getFileElement().dataset['useCatapultLegacyUi'] = '1';
   getFileElement().click();
 }
 
+function popupFileSelectionDialogOldUITruncate(e: Event) {
+  e.preventDefault();
+  delete getFileElement().dataset['video'];
+  getFileElement().dataset['useCatapultLegacyUi'] = '1';
+  getFileElement().dataset['truncate'] = '1';
+  getFileElement().click();
+}
+
 function popupVideoSelectionDialog(e: Event) {
   e.preventDefault();
   delete getFileElement().dataset['useCatapultLegacyUi'];
@@ -254,15 +276,30 @@
   }
   if (!e.target.files) return;
   const file = e.target.files[0];
+  // Reset the value so onchange will be fired with the same file.
+  e.target.value = '';
 
   globals.frontendLocalState.localOnlyMode = false;
 
   if (e.target.dataset['useCatapultLegacyUi'] === '1') {
-    // Switch back the old catapult UI.
+    // Switch back to the old catapult UI.
     if (isLegacyTrace(file.name)) {
       openFileWithLegacyTraceViewer(file);
     } else {
-      globals.dispatch(Actions.convertTraceToJson({file}));
+      if (e.target.dataset['truncate'] === '1') {
+        globals.dispatch(Actions.convertTraceToJson({file, truncate: true}));
+        return;
+      } else if (file.size > 1024 * 1024 * 50) {
+        const size = Math.round(file.size / (1024 * 1024));
+        const result = confirm(
+            `This trace is ${size}mb, opening it in ` +
+            `the legacy UI may fail.\nPress 'OK' to attempt to open this ` +
+            `trace or press 'Cancel' and use the 'Truncate' button ` +
+            `to load just the first 50mb.\nMore options can be found at ` +
+            `go/opening-large-traces.`);
+        if (!result) return;
+      }
+      globals.dispatch(Actions.convertTraceToJson({file, truncate: false}));
     }
     return;
   }