perfetto: consolidate all Python libraries into a single folder
This CL consolidates all the Python libraries which have been written
over the past couple of years into a single top level python/ directory.
This makes it a lot easier to reason about this code, ring-fence
things like build targets and also makes dealing with imports a lot
easier.
Change-Id: Ia0d7637c0993dde51af19ce052e18e4773349596
Bug: 180499808
diff --git a/tools/batch_trace_processor/main.py b/tools/batch_trace_processor/main.py
deleted file mode 100644
index 651247d..0000000
--- a/tools/batch_trace_processor/main.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Given a trace file, gives the self-time of userspace slices broken
-down by process, thread and thread state.
-"""
-
-import argparse
-import cmd
-import logging
-import numpy as np
-import pandas as pd
-import plotille
-
-from perfetto.batch_trace_processor.api import BatchTraceProcessor, BatchTraceProcessorConfig
-from perfetto.trace_processor import TraceProcessorException, TraceProcessorConfig
-from typing import List
-
-
-class TpBatchShell(cmd.Cmd):
-
- def __init__(self, files: List[str], batch_tp: BatchTraceProcessor):
- super().__init__()
- self.files = files
- self.batch_tp = batch_tp
-
- def do_table(self, arg: str):
- try:
- data = self.batch_tp.query_and_flatten(arg)
- print(data)
- except TraceProcessorException as ex:
- logging.error("Query failed: {}".format(ex))
-
- def do_histogram(self, arg: str):
- try:
- data = self.batch_tp.query_single_result(arg)
- print(plotille.histogram(data))
- self.print_percentiles(data)
- except TraceProcessorException as ex:
- logging.error("Query failed: {}".format(ex))
-
- def do_vhistogram(self, arg: str):
- try:
- data = self.batch_tp.query_single_result(arg)
- print(plotille.hist(data))
- self.print_percentiles(data)
- except TraceProcessorException as ex:
- logging.error("Query failed: {}".format(ex))
-
- def do_count(self, arg: str):
- try:
- data = self.batch_tp.query_single_result(arg)
- counts = dict()
- for i in data:
- counts[i] = counts.get(i, 0) + 1
- print(counts)
- except TraceProcessorException as ex:
- logging.error("Query failed: {}".format(ex))
-
- def do_close(self, _):
- return True
-
- def do_quit(self, _):
- return True
-
- def do_EOF(self, _):
- print("")
- return True
-
- def print_percentiles(self, data):
- percentiles = [25, 50, 75, 95, 99, 99.9]
- nearest = np.percentile(data, percentiles, interpolation='nearest')
- logging.info("Representative traces for percentiles")
- for i, near in enumerate(nearest):
- print("{}%: {}".format(percentiles[i], self.files[data.index(near)]))
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--shell-path', default=None)
- parser.add_argument('--verbose', action='store_true', default=False)
- parser.add_argument('--file-list', default=None)
- parser.add_argument('--query-file', default=None)
- parser.add_argument('--interactive', default=None)
- parser.add_argument('files', nargs='*')
- args = parser.parse_args()
-
- logging.basicConfig(level=logging.DEBUG)
-
- files = args.files
- if args.file_list:
- with open(args.file_list, 'r') as f:
- files += f.read().splitlines()
-
- if not files:
- logging.info("At least one file must be specified in files or file list")
-
- logging.info('Loading traces...')
- config = BatchTraceProcessorConfig(
- tp_config=TraceProcessorConfig(
- bin_path=args.shell_path,
- verbose=args.verbose,
- ))
-
- with BatchTraceProcessor(files, config) as batch_tp:
- if args.query_file:
- logging.info('Running query file...')
-
- with open(args.query_file, 'r') as f:
- queries_str = f.read()
-
- queries = [q.strip() for q in queries_str.split(";\n")]
- for q in queries[:-1]:
- batch_tp.query(q)
-
- res = batch_tp.query_and_flatten(queries[-1])
- print(res.to_csv(index=False))
-
- if args.interactive or not args.query_file:
- try:
- TpBatchShell(files, batch_tp).cmdloop()
- except KeyboardInterrupt:
- pass
-
- logging.info("Closing; please wait...")
-
-
-if __name__ == '__main__':
- exit(main())
diff --git a/tools/batch_trace_processor/perfetto/batch_trace_processor/__init__.py b/tools/batch_trace_processor/perfetto/batch_trace_processor/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tools/batch_trace_processor/perfetto/batch_trace_processor/__init__.py
+++ /dev/null
diff --git a/tools/batch_trace_processor/perfetto/batch_trace_processor/api.py b/tools/batch_trace_processor/perfetto/batch_trace_processor/api.py
deleted file mode 100644
index dcd640c..0000000
--- a/tools/batch_trace_processor/perfetto/batch_trace_processor/api.py
+++ /dev/null
@@ -1,309 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Contains classes for BatchTraceProcessor API."""
-
-from concurrent.futures.thread import ThreadPoolExecutor
-import dataclasses as dc
-import multiprocessing
-from typing import Any, Callable, Dict, Optional, Tuple, Union, List
-from numpy.lib.npyio import load
-
-import pandas as pd
-
-from perfetto.trace_processor import LoadableTrace
-from perfetto.trace_processor import TraceProcessor
-from perfetto.trace_processor import TraceProcessorException
-from perfetto.trace_processor import TraceProcessorConfig
-
-
-@dc.dataclass
-class BatchLoadableTrace:
- trace: LoadableTrace
- args: Dict[str, str]
-
-
-@dc.dataclass
-class BatchTraceProcessorConfig:
- TraceProvider = Callable[[str], List[
- Union[LoadableTrace, BatchLoadableTrace]]]
-
- tp_config: TraceProcessorConfig
-
- query_executor: Optional[ThreadPoolExecutor]
- load_executor: Optional[ThreadPoolExecutor]
-
- trace_provider: TraceProvider
-
- def __default_trace_provider(custom_string: str):
- del custom_string
- raise TraceProcessorException(
- 'Passed a string to batch trace processor constructor without '
- 'a trace provider being registered.')
-
- def __init__(self,
- tp_config: TraceProcessorConfig = TraceProcessorConfig(),
- query_executor: Optional[ThreadPoolExecutor] = None,
- load_executor: Optional[ThreadPoolExecutor] = None,
- trace_provider: TraceProvider = __default_trace_provider):
- self.tp_config = tp_config
-
- self.query_executor = query_executor
- self.load_executor = load_executor
-
- self.trace_provider = trace_provider
-
- try:
- # This is the only place in batch trace processor which should import
- # from a "vendor" namespace - the purpose of this code is to allow
- # for users to set their own "default" config for batch trace processor
- # without needing to specify the config in every place when batch
- # trace processor is used.
- from .vendor import override_batch_tp_config
- override_batch_tp_config(self)
- except ModuleNotFoundError:
- pass
-
-
-class BatchTraceProcessor:
- """Run ad-hoc SQL queries across many Perfetto traces.
-
- Usage:
- with BatchTraceProcessor(traces) as btp:
- dfs = btp.query('select * from slice')
- for df in dfs:
- print(df)
- """
-
- def __init__(
- self,
- traces: Union[str, List[Union[LoadableTrace, BatchLoadableTrace]]],
- config: BatchTraceProcessorConfig = BatchTraceProcessorConfig()):
- """Creates a batch trace processor instance.
-
- BatchTraceProcessor is the blessed way of running ad-hoc queries in
- Python across many traces.
-
- Args:
- traces: Either a list of traces or a custom string which will be
- converted to a list of traces.
-
- If a list, each item can be one of the following types:
- 1) path to a trace file to open and read
- 2) a file like object (file, io.BytesIO or similar) to read
- 3) a generator yielding bytes
- 4) a BatchLoadableTrace object; this is basically a wrapper around
- one of the above types plus an args field; see |query_and_flatten|
- for the motivation for the args field.
-
- If a string, it is passed to BatchTraceProcessorConfig.trace_provider to
- convert to a list of traces; the default implementation of this
- function just throws an exception so an implementation must be provided
- if strings will be passed.
- config: configuration options which customize functionality of batch
- trace processor and underlying trace processors.
- """
-
- def _create_batch_trace(x: Union[LoadableTrace, BatchLoadableTrace]
- ) -> BatchLoadableTrace:
- if isinstance(x, BatchLoadableTrace):
- return x
- return BatchLoadableTrace(trace=x, args={})
-
- def create_tp(trace: BatchLoadableTrace) -> TraceProcessor:
- return TraceProcessor(trace=trace.trace, config=config.tp_config)
-
- if isinstance(traces, str):
- trace_list = config.trace_provider(traces)
- else:
- trace_list = traces
-
- batch_traces = [_create_batch_trace(t) for t in trace_list]
-
- # As trace processor is completely CPU bound, it makes sense to just
- # max out the CPUs available.
- query_executor = config.query_executor or ThreadPoolExecutor(
- max_workers=multiprocessing.cpu_count())
- load_exectuor = config.load_executor or query_executor
-
- self.tps = None
- self.closed = False
- self.query_executor = query_executor
- self.args = [t.args for t in batch_traces]
- self.tps = list(load_exectuor.map(create_tp, batch_traces))
-
- def metric(self, metrics: List[str]):
- """Computes the provided metrics.
-
- The computation happens in parallel across all the traces.
-
- Args:
- metrics: A list of valid metrics as defined in TraceMetrics
-
- Returns:
- A list of TraceMetric protos (one for each trace).
- """
- return self.execute(lambda tp: tp.metric(metrics))
-
- def query(self, sql: str):
- """Executes the provided SQL statement (returning a single row).
-
- The execution happens in parallel across all the traces.
-
- Args:
- sql: The SQL statement to execute.
-
- Returns:
- A list of Pandas dataframes with the result of executing the query (one
- per trace).
-
- Raises:
- TraceProcessorException: An error occurred running the query.
- """
- return self.execute(lambda tp: tp.query(sql).as_pandas_dataframe())
-
- def query_and_flatten(self, sql: str):
- """Executes the provided SQL statement and flattens the result.
-
- The execution happens in parallel across all the traces and the
- resulting Pandas dataframes are flattened into a single dataframe.
-
- Args:
- sql: The SQL statement to execute.
-
- Returns:
- A concatenated Pandas dataframe containing the result of executing the
- query across all the traces.
-
- If |BatchLoadableTrace| objects were passed to the constructor, the
- contents of the |args| dictionary will also be emitted as extra columns
- (key being column name, value being the value in the dataframe).
-
- For example:
- traces = [BatchLoadableTrace(trace='/tmp/path', args={"foo": "bar"})]
- with BatchTraceProcessor(traces) as btp:
- df = btp.query_and_flatten('select count(1) as cnt from slice')
-
- Then df will look like this:
- cnt foo
- 100 bar
-
- Raises:
- TraceProcessorException: An error occurred running the query.
- """
- return self.execute_and_flatten(lambda tp: tp.query(sql).
- as_pandas_dataframe())
-
- def query_single_result(self, sql: str):
- """Executes the provided SQL statement (returning a single row).
-
- The execution happens in parallel across all the traces.
-
- Args:
- sql: The SQL statement to execute. This statement should return exactly
- one row on any trace.
-
- Returns:
- A list of values with the result of executing the query (one per trace).
-
- Raises:
- TraceProcessorException: An error occurred running the query or more than
- one result was returned.
- """
-
- def query_single_result_inner(tp):
- df = tp.query(sql).as_pandas_dataframe()
- if len(df.index) != 1:
- raise TraceProcessorException("Query should only return a single row")
-
- if len(df.columns) != 1:
- raise TraceProcessorException(
- "Query should only return a single column")
-
- return df.iloc[0, 0]
-
- return self.execute(query_single_result_inner)
-
- def execute(self, fn: Callable[[TraceProcessor], Any]) -> List[Any]:
- """Executes the provided function.
-
- The execution happens in parallel across all the trace processor instances
- owned by this object.
-
- Args:
- fn: The function to execute.
-
- Returns:
- A list of values with the result of executing the fucntion (one per
- trace).
- """
- return list(self.query_executor.map(fn, self.tps))
-
- def execute_and_flatten(self, fn: Callable[[TraceProcessor], pd.DataFrame]
- ) -> pd.DataFrame:
- """Executes the provided function and flattens the result.
-
- The execution happens in parallel across all the trace processor
- instances owned by this object and the returned Pandas dataframes are
- flattened into a single dataframe.
-
- Args:
- fn: The function to execute which returns a Pandas dataframe.
-
- Returns:
- A Pandas dataframe containing the result of executing the query across all
- the traces. Extra columns containing the file path and args will
- be added to the dataframe (see |query_and_flatten| for details).
- """
-
- def wrapped(pair: Tuple[TraceProcessor, BatchLoadableTrace]):
- (tp, args) = pair
- df = fn(tp)
- for key, value in args.items():
- df[key] = value
- return df
-
- df = pd.concat(
- list(self.query_executor.map(wrapped, zip(self.tps, self.args))))
- return df.reset_index(drop=True)
-
- def close(self):
- """Closes this batch trace processor instance.
-
- This closes all spawned trace processor instances, releasing all the memory
- and resources those instances take.
-
- No further calls to other methods in this class should be made after
- calling this method.
- """
- if self.closed:
- return
- self.closed = True
-
- if self.tps:
- for tp in self.tps:
- tp.close()
-
- def __enter__(self):
- return self
-
- def __exit__(self, a, b, c):
- del a, b, c # Unused.
- self.close()
- return False
-
- def __del__(self):
- self.close()
diff --git a/tools/gen_binary_descriptors b/tools/gen_binary_descriptors
index 5388c48..dbbf8c7 100755
--- a/tools/gen_binary_descriptors
+++ b/tools/gen_binary_descriptors
@@ -26,10 +26,10 @@
SOURCE_TARGET = [
('protos/perfetto/trace_processor/trace_processor.proto',
- 'src/trace_processor/python/perfetto/trace_processor/trace_processor.descriptor'
+ 'python/perfetto/trace_processor/trace_processor.descriptor'
),
('protos/perfetto/metrics/metrics.proto',
- 'src/trace_processor/python/perfetto/trace_processor/metrics.descriptor'),
+ 'python/perfetto/trace_processor/metrics.descriptor'),
]
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
diff --git a/tools/run_python_api_tests.py b/tools/run_python_api_tests.py
deleted file mode 100755
index 78a78a1..0000000
--- a/tools/run_python_api_tests.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import argparse
-import os
-import sys
-import unittest
-
-ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-
-
-def main():
- try:
- import numpy
- import pandas
- except ModuleNotFoundError:
- print('Cannot proceed. Please `pip3 install pandas numpy`', file=sys.stderr)
- return 1
-
- # Append test and src paths so that all imports are loaded in correctly
- sys.path.append(os.path.join(ROOT_DIR, 'test', 'trace_processor', 'python'))
- sys.path.append(
- os.path.join(ROOT_DIR, 'src', 'trace_processor', 'python', 'perfetto'))
- import api_unittest
- import api_integrationtest
-
- # Set paths to trace_processor_shell and root directory as environment
- # variables
- parser = argparse.ArgumentParser()
- parser.add_argument("shell", type=str)
- os.environ["SHELL_PATH"] = parser.parse_args().shell
- os.environ["ROOT_DIR"] = ROOT_DIR
-
- # Initialise test suite
- loader = unittest.TestLoader()
- suite = unittest.TestSuite()
-
- # Add all relevant tests to test suite
- suite.addTests(loader.loadTestsFromModule(api_unittest))
- suite.addTests(loader.loadTestsFromModule(api_integrationtest))
-
- # Initialise runner to run all tests in suite
- runner = unittest.TextTestRunner(verbosity=3)
- result = runner.run(suite)
-
- return 0 if result.wasSuccessful() else 1
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/tools/slice_breakdown/main.py b/tools/slice_breakdown/main.py
deleted file mode 100644
index f6a4c4c..0000000
--- a/tools/slice_breakdown/main.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Given a trace file, gives the self-time of userspace slices broken
-down by process, thread and thread state.
-"""
-
-import argparse
-import sys
-
-from perfetto.slice_breakdown import compute_breakdown
-from perfetto.slice_breakdown import compute_breakdown_for_startup
-from perfetto.trace_processor import TraceProcessor
-from perfetto.trace_processor import TraceProcessorConfig
-
-
-def compute_breakdown_wrapper(args):
- config = TraceProcessorConfig(bin_path=args.shell_path, verbose=args.verbose)
- with TraceProcessor(trace=args.file, config=config) as tp:
- if args.startup_bounds:
- breakdown = compute_breakdown_for_startup(tp, args.startup_package,
- args.process_name)
- else:
- breakdown = compute_breakdown(tp, args.start_ts, args.end_ts,
- args.process_name)
- return breakdown
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--file', required=True)
- parser.add_argument('--shell-path', default=None)
- parser.add_argument('--start-ts', default=None)
- parser.add_argument('--end-ts', default=None)
- parser.add_argument('--startup-bounds', action='store_true', default=False)
- parser.add_argument('--startup-package', default=None)
- parser.add_argument('--process-name', default=None)
- parser.add_argument('--verbose', action='store_true', default=False)
- parser.add_argument('--out-csv', required=True)
- args = parser.parse_args()
-
- if (args.start_ts or args.end_ts) and args.startup_bounds:
- print("Cannot specify --start-ts or --end-ts and --startup-bounds")
- return 1
-
- if args.startup_package and not args.startup_bounds:
- print("Must specify --startup-bounds if --startup-package is specified")
- return 1
-
- breakdown = compute_breakdown_wrapper(args)
-
- if args.out_csv:
- diff_csv = breakdown.to_csv(index=False)
- if args.out_csv == '-':
- sys.stdout.write(diff_csv)
- else:
- with open(args.out_csv, 'w') as out:
- out.write(diff_csv)
-
- return 0
-
-
-if __name__ == '__main__':
- exit(main())
diff --git a/tools/slice_breakdown/perfetto/slice_breakdown/__init__.py b/tools/slice_breakdown/perfetto/slice_breakdown/__init__.py
deleted file mode 100644
index 6e6169f..0000000
--- a/tools/slice_breakdown/perfetto/slice_breakdown/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from .breakdown import compute_breakdown, compute_breakdown_for_startup
\ No newline at end of file
diff --git a/tools/slice_breakdown/perfetto/slice_breakdown/breakdown.py b/tools/slice_breakdown/perfetto/slice_breakdown/breakdown.py
deleted file mode 100644
index 85b21d2..0000000
--- a/tools/slice_breakdown/perfetto/slice_breakdown/breakdown.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-def compute_breakdown(tp, start_ts=None, end_ts=None, process_name=None):
- """For each userspace slice in the trace processor instance |tp|, computes
- the self-time of that slice grouping by process name, thread name
- and thread state.
-
- Args:
- tp: the trace processor instance to query.
- start_ts: optional bound to only consider slices after this ts
- end_ts: optional bound to only consider slices until this ts
- process_name: optional process name to filter for slices; specifying
- this argument can make computing the breakdown a lot faster.
-
- Returns:
- A Pandas dataframe containing the total self time taken by a slice stack
- broken down by process name, thread name and thread state.
- """
- bounds = tp.query('SELECT * FROM trace_bounds').as_pandas_dataframe()
- start_ts = start_ts if start_ts else bounds['start_ts'][0]
- end_ts = end_ts if end_ts else bounds['end_ts'][0]
-
- tp.query("""
- DROP VIEW IF EXISTS modded_names
- """)
-
- tp.query("""
- CREATE VIEW modded_names AS
- SELECT
- slice.id,
- slice.depth,
- slice.stack_id,
- CASE
- WHEN slice.name LIKE 'Choreographer#doFrame%'
- THEN 'Choreographer#doFrame'
- WHEN slice.name LIKE 'DrawFrames%'
- THEN 'DrawFrames'
- WHEN slice.name LIKE '/data/app%.apk'
- THEN 'APK load'
- WHEN slice.name LIKE 'OpenDexFilesFromOat%'
- THEN 'OpenDexFilesFromOat'
- WHEN slice.name LIKE 'Open oat file%'
- THEN 'Open oat file'
- ELSE slice.name
- END AS modded_name
- FROM slice
- """)
-
- tp.query("""
- DROP VIEW IF EXISTS thread_slice_stack
- """)
-
- tp.query("""
- CREATE VIEW thread_slice_stack AS
- SELECT
- efs.ts,
- efs.dur,
- IFNULL(n.stack_id, -1) AS stack_id,
- t.utid,
- IIF(efs.source_id IS NULL, '[No slice]', IFNULL(
- (
- SELECT GROUP_CONCAT(modded_name, ' > ')
- FROM (
- SELECT p.modded_name
- FROM ancestor_slice(efs.source_id) a
- JOIN modded_names p ON a.id = p.id
- ORDER BY p.depth
- )
- ) || ' > ' || n.modded_name,
- n.modded_name
- )) AS stack_name
- FROM experimental_flat_slice({}, {}) efs
- LEFT JOIN modded_names n ON efs.source_id = n.id
- JOIN thread_track t ON t.id = efs.track_id
- """.format(start_ts, end_ts))
-
- tp.query("""
- DROP TABLE IF EXISTS thread_slice_stack_with_state
- """)
-
- tp.query("""
- CREATE VIRTUAL TABLE thread_slice_stack_with_state
- USING SPAN_JOIN(
- thread_slice_stack PARTITIONED utid,
- thread_state PARTITIONED utid
- )
- """)
-
- if process_name:
- where_process = "AND process.name = '{}'".format(process_name)
- else:
- where_process = ''
-
- breakdown = tp.query("""
- SELECT
- process.name AS process_name,
- thread.name AS thread_name,
- CASE
- WHEN slice.state = 'D' and slice.io_wait
- THEN 'Uninterruptible sleep (IO)'
- WHEN slice.state = 'DK' and slice.io_wait
- THEN 'Uninterruptible sleep + Wake-kill (IO)'
- WHEN slice.state = 'D' and not slice.io_wait
- THEN 'Uninterruptible sleep (non-IO)'
- WHEN slice.state = 'DK' and not slice.io_wait
- THEN 'Uninterruptible sleep + Wake-kill (non-IO)'
- WHEN slice.state = 'D'
- THEN 'Uninterruptible sleep'
- WHEN slice.state = 'DK'
- THEN 'Uninterruptible sleep + Wake-kill'
- WHEN slice.state = 'S' THEN 'Interruptible sleep'
- WHEN slice.state = 'R' THEN 'Runnable'
- WHEN slice.state = 'R+' THEN 'Runnable (Preempted)'
- ELSE slice.state
- END AS state,
- slice.stack_name,
- SUM(slice.dur)/1e6 AS dur_sum,
- MIN(slice.dur/1e6) AS dur_min,
- MAX(slice.dur/1e6) AS dur_max,
- AVG(slice.dur/1e6) AS dur_mean,
- PERCENTILE(slice.dur/1e6, 50) AS dur_median,
- PERCENTILE(slice.dur/1e6, 25) AS dur_25_percentile,
- PERCENTILE(slice.dur/1e6, 75) AS dur_75_percentile,
- PERCENTILE(slice.dur/1e6, 95) AS dur_95_percentile,
- PERCENTILE(slice.dur/1e6, 99) AS dur_99_percentile,
- COUNT(1) as count
- FROM process
- JOIN thread USING (upid)
- JOIN thread_slice_stack_with_state slice USING (utid)
- WHERE dur != -1 {}
- GROUP BY thread.name, stack_id, state
- ORDER BY dur_sum DESC
- """.format(where_process)).as_pandas_dataframe()
-
- return breakdown
-
-
-def compute_breakdown_for_startup(tp, package_name=None, process_name=None):
- """Computes the slice breakdown (like |compute_breakdown|) but only
- considering slices which happened during an app startup
-
- Args:
- tp: the trace processor instance to query.
- package_name: optional package name to filter for startups. Only a single
- startup matching this package name should be present. If not specified,
- only a single startup of any app should be in the trace.
- process_name: optional process name to filter for slices; specifying
- this argument can make computing the breakdown a lot faster.
-
- Returns:
- The same as |compute_breakdown| but only containing slices which happened
- during app startup.
- """
- tp.metric(['android_startup'])
-
- # Verify there was only one startup in the trace matching the package
- # name.
- filter = "WHERE package = '{}'".format(package_name) if package_name else ''
- launches = tp.query('''
- SELECT ts, ts_end, dur
- FROM launches
- {}
- '''.format(filter)).as_pandas_dataframe()
- if len(launches) == 0:
- raise Exception("Didn't find startup in trace")
- if len(launches) > 1:
- raise Exception("Found multiple startups in trace")
-
- start = launches['ts'][0]
- end = launches['ts_end'][0]
-
- return compute_breakdown(tp, start, end, process_name)