Merge "tp: CREATE OR REPLACE Perfetto index" into main
diff --git a/Android.bp b/Android.bp
index de5c166..ed06855 100644
--- a/Android.bp
+++ b/Android.bp
@@ -12914,6 +12914,7 @@
"src/trace_processor/perfetto_sql/intrinsics/functions/create_function.cc",
"src/trace_processor/perfetto_sql/intrinsics/functions/create_view_function.cc",
"src/trace_processor/perfetto_sql/intrinsics/functions/dominator_tree.cc",
+ "src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.cc",
"src/trace_processor/perfetto_sql/intrinsics/functions/graph_traversal.cc",
"src/trace_processor/perfetto_sql/intrinsics/functions/import.cc",
"src/trace_processor/perfetto_sql/intrinsics/functions/layout_functions.cc",
@@ -13113,10 +13114,13 @@
"src/trace_processor/perfetto_sql/stdlib/android/frames/timeline_maxsdk28.sql",
"src/trace_processor/perfetto_sql/stdlib/android/freezer.sql",
"src/trace_processor/perfetto_sql/stdlib/android/garbage_collection.sql",
+ "src/trace_processor/perfetto_sql/stdlib/android/gpu/frequency.sql",
+ "src/trace_processor/perfetto_sql/stdlib/android/gpu/memory.sql",
"src/trace_processor/perfetto_sql/stdlib/android/input.sql",
"src/trace_processor/perfetto_sql/stdlib/android/io.sql",
"src/trace_processor/perfetto_sql/stdlib/android/job_scheduler.sql",
"src/trace_processor/perfetto_sql/stdlib/android/memory/heap_graph/dominator_tree.sql",
+ "src/trace_processor/perfetto_sql/stdlib/android/memory/process.sql",
"src/trace_processor/perfetto_sql/stdlib/android/monitor_contention.sql",
"src/trace_processor/perfetto_sql/stdlib/android/network_packets.sql",
"src/trace_processor/perfetto_sql/stdlib/android/oom_adjuster.sql",
@@ -13151,9 +13155,9 @@
"src/trace_processor/perfetto_sql/stdlib/deprecated/v42/common/slices.sql",
"src/trace_processor/perfetto_sql/stdlib/deprecated/v42/common/timestamps.sql",
"src/trace_processor/perfetto_sql/stdlib/export/to_firefox_profile.sql",
- "src/trace_processor/perfetto_sql/stdlib/gpu/frequency.sql",
"src/trace_processor/perfetto_sql/stdlib/graphs/dominator_tree.sql",
"src/trace_processor/perfetto_sql/stdlib/graphs/partition.sql",
+ "src/trace_processor/perfetto_sql/stdlib/graphs/scan.sql",
"src/trace_processor/perfetto_sql/stdlib/graphs/search.sql",
"src/trace_processor/perfetto_sql/stdlib/intervals/intersect.sql",
"src/trace_processor/perfetto_sql/stdlib/intervals/overlap.sql",
@@ -13163,10 +13167,9 @@
"src/trace_processor/perfetto_sql/stdlib/linux/cpu/utilization/process.sql",
"src/trace_processor/perfetto_sql/stdlib/linux/cpu/utilization/system.sql",
"src/trace_processor/perfetto_sql/stdlib/linux/cpu/utilization/thread.sql",
- "src/trace_processor/perfetto_sql/stdlib/memory/android/gpu.sql",
- "src/trace_processor/perfetto_sql/stdlib/memory/linux/general.sql",
- "src/trace_processor/perfetto_sql/stdlib/memory/linux/high_watermark.sql",
- "src/trace_processor/perfetto_sql/stdlib/memory/linux/process.sql",
+ "src/trace_processor/perfetto_sql/stdlib/linux/memory/general.sql",
+ "src/trace_processor/perfetto_sql/stdlib/linux/memory/high_watermark.sql",
+ "src/trace_processor/perfetto_sql/stdlib/linux/memory/process.sql",
"src/trace_processor/perfetto_sql/stdlib/pkvm/hypervisor.sql",
"src/trace_processor/perfetto_sql/stdlib/prelude/casts.sql",
"src/trace_processor/perfetto_sql/stdlib/prelude/slices.sql",
diff --git a/BUILD b/BUILD
index 6608e3d..a1c29e2 100644
--- a/BUILD
+++ b/BUILD
@@ -2346,6 +2346,8 @@
"src/trace_processor/perfetto_sql/intrinsics/functions/create_view_function.h",
"src/trace_processor/perfetto_sql/intrinsics/functions/dominator_tree.cc",
"src/trace_processor/perfetto_sql/intrinsics/functions/dominator_tree.h",
+ "src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.cc",
+ "src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.h",
"src/trace_processor/perfetto_sql/intrinsics/functions/graph_traversal.cc",
"src/trace_processor/perfetto_sql/intrinsics/functions/graph_traversal.h",
"src/trace_processor/perfetto_sql/intrinsics/functions/import.cc",
@@ -2515,6 +2517,15 @@
],
)
+# GN target: //src/trace_processor/perfetto_sql/stdlib/android/gpu:gpu
+perfetto_filegroup(
+ name = "src_trace_processor_perfetto_sql_stdlib_android_gpu_gpu",
+ srcs = [
+ "src/trace_processor/perfetto_sql/stdlib/android/gpu/frequency.sql",
+ "src/trace_processor/perfetto_sql/stdlib/android/gpu/memory.sql",
+ ],
+)
+
# GN target: //src/trace_processor/perfetto_sql/stdlib/android/memory/heap_graph:heap_graph
perfetto_filegroup(
name = "src_trace_processor_perfetto_sql_stdlib_android_memory_heap_graph_heap_graph",
@@ -2526,6 +2537,9 @@
# GN target: //src/trace_processor/perfetto_sql/stdlib/android/memory:memory
perfetto_filegroup(
name = "src_trace_processor_perfetto_sql_stdlib_android_memory_memory",
+ srcs = [
+ "src/trace_processor/perfetto_sql/stdlib/android/memory/process.sql",
+ ],
)
# GN target: //src/trace_processor/perfetto_sql/stdlib/android/startup:startup
@@ -2630,20 +2644,13 @@
],
)
-# GN target: //src/trace_processor/perfetto_sql/stdlib/gpu:gpu
-perfetto_filegroup(
- name = "src_trace_processor_perfetto_sql_stdlib_gpu_gpu",
- srcs = [
- "src/trace_processor/perfetto_sql/stdlib/gpu/frequency.sql",
- ],
-)
-
# GN target: //src/trace_processor/perfetto_sql/stdlib/graphs:graphs
perfetto_filegroup(
name = "src_trace_processor_perfetto_sql_stdlib_graphs_graphs",
srcs = [
"src/trace_processor/perfetto_sql/stdlib/graphs/dominator_tree.sql",
"src/trace_processor/perfetto_sql/stdlib/graphs/partition.sql",
+ "src/trace_processor/perfetto_sql/stdlib/graphs/scan.sql",
"src/trace_processor/perfetto_sql/stdlib/graphs/search.sql",
],
)
@@ -2677,34 +2684,21 @@
],
)
+# GN target: //src/trace_processor/perfetto_sql/stdlib/linux/memory:memory
+perfetto_filegroup(
+ name = "src_trace_processor_perfetto_sql_stdlib_linux_memory_memory",
+ srcs = [
+ "src/trace_processor/perfetto_sql/stdlib/linux/memory/general.sql",
+ "src/trace_processor/perfetto_sql/stdlib/linux/memory/high_watermark.sql",
+ "src/trace_processor/perfetto_sql/stdlib/linux/memory/process.sql",
+ ],
+)
+
# GN target: //src/trace_processor/perfetto_sql/stdlib/linux:linux
perfetto_filegroup(
name = "src_trace_processor_perfetto_sql_stdlib_linux_linux",
)
-# GN target: //src/trace_processor/perfetto_sql/stdlib/memory/android:android
-perfetto_filegroup(
- name = "src_trace_processor_perfetto_sql_stdlib_memory_android_android",
- srcs = [
- "src/trace_processor/perfetto_sql/stdlib/memory/android/gpu.sql",
- ],
-)
-
-# GN target: //src/trace_processor/perfetto_sql/stdlib/memory/linux:linux
-perfetto_filegroup(
- name = "src_trace_processor_perfetto_sql_stdlib_memory_linux_linux",
- srcs = [
- "src/trace_processor/perfetto_sql/stdlib/memory/linux/general.sql",
- "src/trace_processor/perfetto_sql/stdlib/memory/linux/high_watermark.sql",
- "src/trace_processor/perfetto_sql/stdlib/memory/linux/process.sql",
- ],
-)
-
-# GN target: //src/trace_processor/perfetto_sql/stdlib/memory:memory
-perfetto_filegroup(
- name = "src_trace_processor_perfetto_sql_stdlib_memory_memory",
-)
-
# GN target: //src/trace_processor/perfetto_sql/stdlib/pkvm:pkvm
perfetto_filegroup(
name = "src_trace_processor_perfetto_sql_stdlib_pkvm_pkvm",
@@ -2817,6 +2811,7 @@
":src_trace_processor_perfetto_sql_stdlib_android_android",
":src_trace_processor_perfetto_sql_stdlib_android_auto_auto",
":src_trace_processor_perfetto_sql_stdlib_android_frames_frames",
+ ":src_trace_processor_perfetto_sql_stdlib_android_gpu_gpu",
":src_trace_processor_perfetto_sql_stdlib_android_memory_heap_graph_heap_graph",
":src_trace_processor_perfetto_sql_stdlib_android_memory_memory",
":src_trace_processor_perfetto_sql_stdlib_android_startup_startup",
@@ -2826,15 +2821,12 @@
":src_trace_processor_perfetto_sql_stdlib_counters_counters",
":src_trace_processor_perfetto_sql_stdlib_deprecated_v42_common_common",
":src_trace_processor_perfetto_sql_stdlib_export_export",
- ":src_trace_processor_perfetto_sql_stdlib_gpu_gpu",
":src_trace_processor_perfetto_sql_stdlib_graphs_graphs",
":src_trace_processor_perfetto_sql_stdlib_intervals_intervals",
":src_trace_processor_perfetto_sql_stdlib_linux_cpu_cpu",
":src_trace_processor_perfetto_sql_stdlib_linux_cpu_utilization_utilization",
":src_trace_processor_perfetto_sql_stdlib_linux_linux",
- ":src_trace_processor_perfetto_sql_stdlib_memory_android_android",
- ":src_trace_processor_perfetto_sql_stdlib_memory_linux_linux",
- ":src_trace_processor_perfetto_sql_stdlib_memory_memory",
+ ":src_trace_processor_perfetto_sql_stdlib_linux_memory_memory",
":src_trace_processor_perfetto_sql_stdlib_pkvm_pkvm",
":src_trace_processor_perfetto_sql_stdlib_prelude_prelude",
":src_trace_processor_perfetto_sql_stdlib_sched_sched",
@@ -2896,9 +2888,12 @@
name = "src_trace_processor_sqlite_bindings_bindings",
srcs = [
"src/trace_processor/sqlite/bindings/sqlite_aggregate_function.h",
+ "src/trace_processor/sqlite/bindings/sqlite_bind.h",
+ "src/trace_processor/sqlite/bindings/sqlite_column.h",
"src/trace_processor/sqlite/bindings/sqlite_function.h",
"src/trace_processor/sqlite/bindings/sqlite_module.h",
"src/trace_processor/sqlite/bindings/sqlite_result.h",
+ "src/trace_processor/sqlite/bindings/sqlite_stmt.h",
"src/trace_processor/sqlite/bindings/sqlite_type.h",
"src/trace_processor/sqlite/bindings/sqlite_value.h",
"src/trace_processor/sqlite/bindings/sqlite_window_function.h",
diff --git a/CHANGELOG b/CHANGELOG
index cc973ac..10f494f 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -12,12 +12,20 @@
* Moved `cpu.freq` module to `linux.cpu.frequency` and renamed
`cpu_freq_counters` to `cpu_frequency_counters` for the same
reason as above.
+ * Moved `gpu.frequency` to `android.gpu.frequency` for the same reason as
+ above.
* Moved `cpu.idle` module to `linux.cpu.idle` for the same
reason as above.
- * Moved `linux.cpu_idle` to `linux.cpu.idle` to make it
+ * Moved content of `linux.cpu_idle` into `linux.cpu.idle` to make it
consistent with above changes.
+ * Moved `memory.android.gpu` to `android.memory.gpu` to make it consistent
+ with above changes.`
+ * Moved contents of `memory.linux.process` to `linux.memory.process` and
+ `android.memory.process` to make it consistent with above changes.
+ * Moved `memory.linux.high_watermark` to `linux.memory.high_watermark` to
+ make it consistent with above changes.
* Moved `memory.heap_graph_dominator_tree` to
- `android.memory.heap_graph.dominator_tree`. This is to prepare for the
+ `android.memory.heap_graph.dominator_tree`. This is to allow for the
addition of more modules related to heap graphs.
Trace Processor:
*
diff --git a/infra/ci/config.py b/infra/ci/config.py
index bde2077..891d739 100755
--- a/infra/ci/config.py
+++ b/infra/ci/config.py
@@ -70,46 +70,58 @@
'non_hermetic_clang_stdlib="libc++" '
'enable_perfetto_merged_protos_check=true',
'PERFETTO_TEST_SCRIPT': 'test/ci/linux_tests.sh',
+ 'INSTALL_BUILD_DEPS': '',
},
'linux-clang-x86_64-tsan': {
'PERFETTO_TEST_GN_ARGS': 'is_debug=false is_tsan=true',
'PERFETTO_TEST_SCRIPT': 'test/ci/linux_tests.sh',
+ 'INSTALL_BUILD_DEPS': '',
},
'linux-clang-x86_64-msan': {
'PERFETTO_TEST_GN_ARGS': 'is_debug=false is_msan=true',
'PERFETTO_TEST_SCRIPT': 'test/ci/linux_tests.sh',
+ 'INSTALL_BUILD_DEPS': '',
},
'linux-clang-x86_64-asan_lsan': {
'PERFETTO_TEST_GN_ARGS': 'is_debug=false is_asan=true is_lsan=true',
'PERFETTO_TEST_SCRIPT': 'test/ci/linux_tests.sh',
+ 'INSTALL_BUILD_DEPS': '',
},
'linux-clang-x86-asan_lsan': {
'PERFETTO_TEST_GN_ARGS': 'is_debug=false is_asan=true is_lsan=true '
'target_cpu="x86"',
'PERFETTO_TEST_SCRIPT': 'test/ci/linux_tests.sh',
+ 'INSTALL_BUILD_DEPS': '',
},
'linux-gcc7-x86_64-release': {
- 'PERFETTO_TEST_GN_ARGS': 'is_debug=false is_clang=false '
- 'cc="gcc-7" cxx="g++-7"',
+ 'PERFETTO_TEST_GN_ARGS':
+ 'is_debug=false is_clang=false enable_perfetto_grpc=true'
+ 'cc="gcc-7" cxx="g++-7"',
'PERFETTO_TEST_SCRIPT': 'test/ci/linux_tests.sh',
+ 'INSTALL_BUILD_DEPS': '--grpc',
},
'android-clang-arm-release': {
'PERFETTO_TEST_GN_ARGS':
'is_debug=false target_os="android" target_cpu="arm"',
'PERFETTO_TEST_SCRIPT':
'test/ci/android_tests.sh',
+ 'INSTALL_BUILD_DEPS':
+ '',
},
'linux-clang-x86_64-libfuzzer': {
'PERFETTO_TEST_GN_ARGS': 'is_debug=false is_fuzzer=true is_asan=true',
'PERFETTO_TEST_SCRIPT': 'test/ci/fuzzer_tests.sh',
+ 'INSTALL_BUILD_DEPS': '',
},
'linux-clang-x86_64-bazel': {
'PERFETTO_TEST_GN_ARGS': '',
'PERFETTO_TEST_SCRIPT': 'test/ci/bazel_tests.sh',
+ 'INSTALL_BUILD_DEPS': '',
},
'ui-clang-x86_64-release': {
'PERFETTO_TEST_GN_ARGS': 'is_debug=false',
'PERFETTO_TEST_SCRIPT': 'test/ci/ui_tests.sh',
+ 'INSTALL_BUILD_DEPS': '',
},
}
diff --git a/python/generators/sql_processing/utils.py b/python/generators/sql_processing/utils.py
index 1bd5f3e..567e888 100644
--- a/python/generators/sql_processing/utils.py
+++ b/python/generators/sql_processing/utils.py
@@ -108,7 +108,7 @@
}
ALLOWED_PREFIXES = {
- 'android': ['heap_graph'],
+ 'android': ['heap_graph', 'memory'],
'counters': ['counter'],
'chrome/util': ['cr'],
'intervals': ['interval'],
diff --git a/python/perfetto/bigtrace/api.py b/python/perfetto/bigtrace/api.py
index ca3abeb..f38102b 100644
--- a/python/perfetto/bigtrace/api.py
+++ b/python/perfetto/bigtrace/api.py
@@ -21,7 +21,7 @@
from perfetto.bigtrace.protos.perfetto.bigtrace.orchestrator_pb2 import BigtraceQueryArgs
from perfetto.bigtrace.protos.perfetto.bigtrace.orchestrator_pb2_grpc import BigtraceOrchestratorStub
from perfetto.common.query_result_iterator import QueryResultIterator
-
+from perfetto.common.exceptions import PerfettoException
class Bigtrace:
@@ -31,23 +31,28 @@
def query(self, traces: List[str], sql_query: str):
if not traces:
- raise Exception("Trace list cannot be empty")
+ raise PerfettoException("Trace list cannot be empty")
if not sql_query:
- raise Exception("SQL query cannot be empty")
+ raise PerfettoException("SQL query cannot be empty")
# Query and then convert to pandas
tables = []
args = BigtraceQueryArgs(traces=traces, sql_query=sql_query)
- for response in self.stub.Query(args):
- repeated_batches = []
- results = response.result
- column_names = results[0].column_names
- for result in results:
- repeated_batches.extend(result.batch)
- iterator = QueryResultIterator(column_names, repeated_batches)
- df = iterator.as_pandas_dataframe()
- # TODO(ivankc) Investigate whether this is the
- # best place to insert these addresses for performance
- df.insert(0, '_trace_address', response.trace)
- tables.append(df)
- flattened = pd.concat(tables)
- return flattened.reset_index(drop=True)
+
+ responses = self.stub.Query(args)
+ try:
+ for response in responses:
+ repeated_batches = []
+ results = response.result
+ column_names = results[0].column_names
+ for result in results:
+ repeated_batches.extend(result.batch)
+ iterator = QueryResultIterator(column_names, repeated_batches)
+ df = iterator.as_pandas_dataframe()
+ # TODO(ivankc) Investigate whether this is the
+ # best place to insert these addresses for performance
+ df.insert(0, '_trace_address', response.trace)
+ tables.append(df)
+ flattened = pd.concat(tables)
+ return flattened.reset_index(drop=True)
+ except grpc.RpcError as e:
+ raise PerfettoException(f"gRPC {e.code().name} error - {e.details()}")
diff --git a/python/perfetto/common/exceptions.py b/python/perfetto/common/exceptions.py
index bddb9c5..b01dea7 100644
--- a/python/perfetto/common/exceptions.py
+++ b/python/perfetto/common/exceptions.py
@@ -16,4 +16,4 @@
class PerfettoException(Exception):
def __init__(self, message):
- super().__init__(message)
+ super().__init__(message)
\ No newline at end of file
diff --git a/python/run_tests.py b/python/run_tests.py
index c6e344a..55c2380 100755
--- a/python/run_tests.py
+++ b/python/run_tests.py
@@ -18,8 +18,9 @@
import sys
import unittest
-from test import query_result_iterator_unittest
from test import api_integrationtest
+# from test import bigtrace_api_integrationtest
+from test import query_result_iterator_unittest
from test import resolver_unittest
from test import stdlib_unittest
@@ -38,8 +39,11 @@
# Set paths to trace_processor_shell and root directory as environment
# variables
parser = argparse.ArgumentParser()
- parser.add_argument("shell", type=str)
- os.environ["SHELL_PATH"] = parser.parse_args().shell
+ parser.add_argument("host_out_path", type=str)
+ host_out_path = parser.parse_args().host_out_path
+ os.environ["SHELL_PATH"] = f"{host_out_path}/trace_processor_shell"
+ os.environ["ORCHESTRATOR_PATH"] = f"{host_out_path}/orchestrator_main"
+ os.environ["WORKER_PATH"] = f"{host_out_path}/worker_main"
os.environ["ROOT_DIR"] = ROOT_DIR
loader = unittest.TestLoader()
@@ -50,6 +54,8 @@
suite.addTests(loader.loadTestsFromModule(resolver_unittest))
suite.addTests(loader.loadTestsFromModule(api_integrationtest))
suite.addTests(loader.loadTestsFromModule(stdlib_unittest))
+ # TODO(ivankc) Uncomment this out when controller rolls out
+ # suite.addTests(loader.loadTestsFromModule(bigtrace_api_integrationtest))
# Initialise runner to run all tests in suite
runner = unittest.TextTestRunner(verbosity=3)
diff --git a/python/test/bigtrace_api_integrationtest.py b/python/test/bigtrace_api_integrationtest.py
new file mode 100644
index 0000000..edb9347
--- /dev/null
+++ b/python/test/bigtrace_api_integrationtest.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import subprocess
+import perfetto.bigtrace.api
+import os
+
+from perfetto.common.exceptions import PerfettoException
+
+
+class BigtraceTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(self):
+ self.root_dir = os.environ["ROOT_DIR"]
+ self.worker = subprocess.Popen(os.environ["WORKER_PATH"])
+ self.orchestrator = subprocess.Popen(os.environ["ORCHESTRATOR_PATH"])
+ self.client = perfetto.bigtrace.api.Bigtrace()
+
+ @classmethod
+ def tearDownClass(self):
+ self.worker.kill()
+ self.orchestrator.kill()
+ del self.client
+
+ def test_valid_traces(self):
+ result = self.client.query([
+ f"{self.root_dir}/test/data/api24_startup_cold.perfetto-trace",
+ f"{self.root_dir}/test/data/api24_startup_hot.perfetto-trace"
+ ], "SELECT count(1) as count FROM slice LIMIT 5")
+
+ self.assertEqual(result['count'][0], 9726)
+ self.assertEqual(result['count'][1], 5726)
+
+ def test_empty_traces(self):
+ with self.assertRaises(PerfettoException):
+ result = self.client.query([], "SELECT count(1) FROM slice LIMIT 5")
+
+ def test_empty_sql_string(self):
+ with self.assertRaises(PerfettoException):
+ result = self.client.query([
+ f"{self.root_dir}/test/data/api24_startup_cold.perfetto-trace",
+ f"{self.root_dir}/test/data/api24_startup_hot.perfetto-trace"
+ ], "")
+
+ def test_message_limit_exceeded(self):
+ with self.assertRaises(PerfettoException):
+ result = self.client.query(
+ [f"{self.root_dir}/test/data/long_task_tracking_trace"],
+ "SELECT * FROM slice")
diff --git a/python/tools/check_ratchet.py b/python/tools/check_ratchet.py
index e8240d3..4054679 100755
--- a/python/tools/check_ratchet.py
+++ b/python/tools/check_ratchet.py
@@ -36,7 +36,7 @@
from dataclasses import dataclass
-EXPECTED_ANY_COUNT = 73
+EXPECTED_ANY_COUNT = 72
EXPECTED_RUN_METRIC_COUNT = 5
ROOT_DIR = os.path.dirname(
diff --git a/src/trace_processor/importers/ftrace/ftrace_parser.cc b/src/trace_processor/importers/ftrace/ftrace_parser.cc
index 21a199a..40d1730 100644
--- a/src/trace_processor/importers/ftrace/ftrace_parser.cc
+++ b/src/trace_processor/importers/ftrace/ftrace_parser.cc
@@ -339,6 +339,7 @@
dma_heap_change_id_(
context->storage->InternString("mem.dma_heap_change")),
dma_buffer_id_(context->storage->InternString("mem.dma_buffer")),
+ inode_arg_id_(context->storage->InternString("inode")),
ion_total_unknown_id_(context->storage->InternString("mem.ion.unknown")),
ion_change_unknown_id_(
context->storage->InternString("mem.ion_change.unknown")),
@@ -1957,8 +1958,13 @@
UniqueTid utid = context_->process_tracker->GetOrCreateThread(pid);
track = context_->track_tracker->InternThreadCounterTrack(dma_heap_change_id_,
utid);
- context_->event_tracker->PushCounter(
+
+ auto opt_counter_id = context_->event_tracker->PushCounter(
timestamp, static_cast<double>(dma_heap.len()), track);
+ if (opt_counter_id) {
+ context_->args_tracker->AddArgsTo(*opt_counter_id)
+ .AddArg(inode_arg_id_, Variadic::UnsignedInteger(dma_heap.inode()));
+ }
// Global track for individual buffer tracking
auto async_track =
diff --git a/src/trace_processor/importers/ftrace/ftrace_parser.h b/src/trace_processor/importers/ftrace/ftrace_parser.h
index 97aa089..ad121da 100644
--- a/src/trace_processor/importers/ftrace/ftrace_parser.h
+++ b/src/trace_processor/importers/ftrace/ftrace_parser.h
@@ -335,6 +335,7 @@
const StringId dma_heap_total_id_;
const StringId dma_heap_change_id_;
const StringId dma_buffer_id_;
+ const StringId inode_arg_id_;
const StringId ion_total_unknown_id_;
const StringId ion_change_unknown_id_;
const StringId bcl_irq_id_;
diff --git a/src/trace_processor/metrics/sql/android/process_mem.sql b/src/trace_processor/metrics/sql/android/process_mem.sql
index f3b1c88..4622462 100644
--- a/src/trace_processor/metrics/sql/android/process_mem.sql
+++ b/src/trace_processor/metrics/sql/android/process_mem.sql
@@ -14,7 +14,8 @@
-- limitations under the License.
--
-INCLUDE PERFETTO MODULE memory.linux.process;
+INCLUDE PERFETTO MODULE android.memory.process;
+INCLUDE PERFETTO MODULE linux.memory.process;
SELECT RUN_METRIC('android/process_oom_score.sql');
diff --git a/src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.cc b/src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.cc
index 851d6e5..0e89f32 100644
--- a/src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.cc
+++ b/src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.cc
@@ -208,6 +208,24 @@
}
}
+base::StatusOr<SqliteEngine::PreparedStatement>
+PerfettoSqlEngine::PrepareSqliteStatement(SqlSource sql_source) {
+ PerfettoSqlParser parser(std::move(sql_source), macros_);
+ if (!parser.Next()) {
+ return base::ErrStatus("No statement found to prepare");
+ }
+ auto* sqlite = std::get_if<PerfettoSqlParser::SqliteSql>(&parser.statement());
+ if (!sqlite) {
+ return base::ErrStatus("Statement was not a valid SQLite statement");
+ }
+ SqliteEngine::PreparedStatement stmt =
+ engine_->PrepareStatement(parser.statement_sql());
+ if (parser.Next()) {
+ return base::ErrStatus("Too many statements found to prepare");
+ }
+ return std::move(stmt);
+}
+
void PerfettoSqlEngine::RegisterStaticTable(Table* table,
const std::string& table_name,
Table::Schema schema) {
diff --git a/src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.h b/src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.h
index 8619e8c..2a266f9 100644
--- a/src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.h
+++ b/src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.h
@@ -17,6 +17,7 @@
#ifndef SRC_TRACE_PROCESSOR_PERFETTO_SQL_ENGINE_PERFETTO_SQL_ENGINE_H_
#define SRC_TRACE_PROCESSOR_PERFETTO_SQL_ENGINE_PERFETTO_SQL_ENGINE_H_
+#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
@@ -83,6 +84,14 @@
// no valid SQL to run.
base::StatusOr<ExecutionResult> ExecuteUntilLastStatement(SqlSource sql);
+ // Prepares a single SQLite statement in |sql| and returns a
+ // |PreparedStatement| object.
+ //
+ // Returns an error if the preparation of the statement failed or if there was
+ // no valid SQL to run.
+ base::StatusOr<SqliteEngine::PreparedStatement> PrepareSqliteStatement(
+ SqlSource sql);
+
// Registers a trace processor C++ function to be runnable from SQL.
//
// The format of the function is given by the |SqlFunction|.
@@ -127,6 +136,10 @@
template <typename Function>
base::Status RegisterSqliteFunction(typename Function::UserDataContext* ctx,
bool deterministic = true);
+ template <typename Function>
+ base::Status RegisterSqliteFunction(
+ std::unique_ptr<typename Function::UserDataContext> ctx,
+ bool deterministic = true);
// Registers a trace processor C++ aggregate function to be runnable from SQL.
//
@@ -391,6 +404,20 @@
}
template <typename Function>
+base::Status PerfettoSqlEngine::RegisterSqliteFunction(
+ std::unique_ptr<typename Function::UserDataContext> ctx,
+ bool deterministic) {
+ static_function_count_++;
+ return engine_->RegisterFunction(
+ Function::kName, Function::kArgCount, Function::Step, ctx.release(),
+ [](void* ptr) {
+ std::unique_ptr<typename Function::UserDataContext>(
+ static_cast<typename Function::UserDataContext*>(ptr));
+ },
+ deterministic);
+}
+
+template <typename Function>
base::Status PerfettoSqlEngine::RegisterSqliteAggregateFunction(
typename Function::UserDataContext* ctx,
bool deterministic) {
diff --git a/src/trace_processor/perfetto_sql/intrinsics/functions/BUILD.gn b/src/trace_processor/perfetto_sql/intrinsics/functions/BUILD.gn
index 7f8dd9a..aa0018b 100644
--- a/src/trace_processor/perfetto_sql/intrinsics/functions/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/intrinsics/functions/BUILD.gn
@@ -28,6 +28,8 @@
"create_view_function.h",
"dominator_tree.cc",
"dominator_tree.h",
+ "graph_scan.cc",
+ "graph_scan.h",
"graph_traversal.cc",
"graph_traversal.h",
"import.cc",
diff --git a/src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.cc b/src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.cc
new file mode 100644
index 0000000..b8fe51a
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.cc
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.h"
+
+#include <algorithm>
+#include <cinttypes>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <utility>
+#include <variant>
+#include <vector>
+
+#include "perfetto/base/logging.h"
+#include "perfetto/base/status.h"
+#include "perfetto/ext/base/status_or.h"
+#include "perfetto/ext/base/string_utils.h"
+#include "src/trace_processor/containers/string_pool.h"
+#include "src/trace_processor/db/runtime_table.h"
+#include "src/trace_processor/perfetto_sql/engine/function_util.h"
+#include "src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.h"
+#include "src/trace_processor/perfetto_sql/intrinsics/types/array.h"
+#include "src/trace_processor/perfetto_sql/intrinsics/types/node.h"
+#include "src/trace_processor/perfetto_sql/intrinsics/types/row_dataframe.h"
+#include "src/trace_processor/perfetto_sql/intrinsics/types/value.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_bind.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_column.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_function.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_result.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_stmt.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_type.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_value.h"
+#include "src/trace_processor/sqlite/sql_source.h"
+#include "src/trace_processor/sqlite/sqlite_engine.h"
+#include "src/trace_processor/sqlite/sqlite_utils.h"
+#include "src/trace_processor/util/status_macros.h"
+
+namespace perfetto::trace_processor {
+namespace {
+
+struct NodeState {
+ uint32_t depth = 0;
+ enum : uint8_t {
+ kUnvisited,
+ kWaitingForDescendants,
+ kDone,
+ } visit_state = kUnvisited;
+};
+
+struct DepthTable {
+ RuntimeTable::Builder builder;
+ uint32_t row_count = 0;
+};
+
+struct GraphScanner {
+ base::StatusOr<std::unique_ptr<RuntimeTable>> Run();
+ std::vector<uint32_t> InitializeStateFromMaxNode();
+ uint32_t DfsAndComputeMaxDepth(std::vector<uint32_t> stack);
+ base::Status PushDownStartingAggregates(RuntimeTable::Builder& res,
+ uint32_t& res_row_count);
+ base::StatusOr<SqliteEngine::PreparedStatement> PrepareStatement() const;
+ base::Status PushDownAggregates(SqliteEngine::PreparedStatement& agg_stmt,
+ uint32_t agg_col_count,
+ RuntimeTable::Builder& res,
+ uint32_t& res_row_count);
+
+ const std::vector<uint32_t>& GetEdges(uint32_t id) {
+ return id < graph.size() ? graph[id].outgoing_edges : empty_edges;
+ }
+
+ PerfettoSqlEngine* engine;
+ StringPool* pool;
+ const perfetto_sql::Graph& graph;
+ const perfetto_sql::RowDataframe& inits;
+ std::string_view reduce;
+ std::vector<uint32_t> empty_edges;
+
+ std::vector<NodeState> state;
+ std::vector<DepthTable> tables_per_depth;
+};
+
+std::vector<uint32_t> GraphScanner::InitializeStateFromMaxNode() {
+ std::vector<uint32_t> stack;
+ auto nodes_size = static_cast<uint32_t>(graph.size());
+ for (uint32_t i = 0; i < inits.size(); ++i) {
+ auto start_id = static_cast<uint32_t>(
+ std::get<int64_t>(inits.cells[i * inits.column_names.size()]));
+ nodes_size = std::max(nodes_size, static_cast<uint32_t>(start_id) + 1);
+ for (uint32_t dest : GetEdges(start_id)) {
+ stack.emplace_back(static_cast<uint32_t>(dest));
+ }
+ }
+ state = std::vector<NodeState>(nodes_size);
+ return stack;
+}
+
+uint32_t GraphScanner::DfsAndComputeMaxDepth(std::vector<uint32_t> stack) {
+ uint32_t max_depth = 0;
+ while (!stack.empty()) {
+ uint32_t source_id = stack.back();
+ NodeState& source = state[source_id];
+ switch (source.visit_state) {
+ case NodeState::kUnvisited:
+ source.visit_state = NodeState::kWaitingForDescendants;
+ for (uint32_t dest_id : GetEdges(source_id)) {
+ stack.push_back(dest_id);
+ }
+ break;
+ case NodeState::kWaitingForDescendants:
+ stack.pop_back();
+ source.visit_state = NodeState::kDone;
+ for (uint32_t dest_id : GetEdges(source_id)) {
+ PERFETTO_DCHECK(state[dest_id].visit_state == NodeState::kDone);
+ source.depth = std::max(state[dest_id].depth + 1, source.depth);
+ }
+ max_depth = std::max(max_depth, source.depth);
+ break;
+ case NodeState::kDone:
+ stack.pop_back();
+ break;
+ }
+ }
+ return max_depth;
+}
+
+base::Status GraphScanner::PushDownAggregates(
+ SqliteEngine::PreparedStatement& agg_stmt,
+ uint32_t agg_col_count,
+ RuntimeTable::Builder& res,
+ uint32_t& res_row_count) {
+ while (agg_stmt.Step()) {
+ auto id =
+ static_cast<uint32_t>(sqlite::column::Int64(agg_stmt.sqlite_stmt(), 0));
+ res_row_count++;
+ RETURN_IF_ERROR(res.AddInteger(0, id));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ dt.row_count++;
+ RETURN_IF_ERROR(dt.builder.AddInteger(0, outgoing));
+ }
+ for (uint32_t i = 1; i < agg_col_count; ++i) {
+ switch (sqlite::column::Type(agg_stmt.sqlite_stmt(), i)) {
+ case sqlite::Type::kNull:
+ RETURN_IF_ERROR(res.AddNull(i));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ RETURN_IF_ERROR(dt.builder.AddNull(i));
+ }
+ break;
+ case sqlite::Type::kInteger: {
+ int64_t a = sqlite::column::Int64(agg_stmt.sqlite_stmt(), i);
+ RETURN_IF_ERROR(res.AddInteger(i, a));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ RETURN_IF_ERROR(dt.builder.AddInteger(i, a));
+ }
+ break;
+ }
+ case sqlite::Type::kText: {
+ const char* a = sqlite::column::Text(agg_stmt.sqlite_stmt(), i);
+ RETURN_IF_ERROR(res.AddText(i, a));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ RETURN_IF_ERROR(dt.builder.AddText(i, a));
+ }
+ break;
+ }
+ case sqlite::Type::kFloat: {
+ double a = sqlite::column::Double(agg_stmt.sqlite_stmt(), i);
+ RETURN_IF_ERROR(res.AddFloat(i, a));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ RETURN_IF_ERROR(dt.builder.AddFloat(i, a));
+ }
+ break;
+ }
+ case sqlite::Type::kBlob:
+ return base::ErrStatus("Unsupported blob type");
+ }
+ }
+ }
+ return agg_stmt.status();
+}
+
+base::StatusOr<SqliteEngine::PreparedStatement> GraphScanner::PrepareStatement()
+ const {
+ std::vector<std::string> select_cols;
+ std::vector<std::string> bind_cols;
+ for (uint32_t i = 0; i < inits.column_names.size(); ++i) {
+ select_cols.emplace_back(
+ base::StackString<1024>("c%" PRIu32 " as %s", i,
+ inits.column_names[i].c_str())
+ .ToStdString());
+ bind_cols.emplace_back(base::StackString<1024>(
+ "__intrinsic_table_ptr_bind(c%" PRIu32 ", '%s')",
+ i, inits.column_names[i].c_str())
+ .ToStdString());
+ }
+
+ // TODO(lalitm): verify that the init aggregates line up correctly with the
+ // aggregation macro.
+ std::string raw_sql =
+ "(SELECT $cols FROM __intrinsic_table_ptr($var) WHERE $where)";
+ raw_sql = base::ReplaceAll(raw_sql, "$cols", base::Join(select_cols, ","));
+ raw_sql = base::ReplaceAll(raw_sql, "$where", base::Join(bind_cols, " AND "));
+ std::string res = base::ReplaceAll(std::string(reduce), "$table", raw_sql);
+ return engine->PrepareSqliteStatement(
+ SqlSource::FromTraceProcessorImplementation("SELECT * FROM " + res));
+}
+
+base::Status GraphScanner::PushDownStartingAggregates(
+ RuntimeTable::Builder& res,
+ uint32_t& res_row_count) {
+ for (uint32_t i = 0; i < inits.size(); ++i) {
+ const auto* cell = inits.cells.data() + i * inits.column_names.size();
+ auto id = static_cast<uint32_t>(std::get<int64_t>(*cell));
+ RETURN_IF_ERROR(res.AddInteger(0, id));
+ res_row_count++;
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ dt.row_count++;
+ RETURN_IF_ERROR(dt.builder.AddInteger(0, outgoing));
+ }
+ for (uint32_t j = 1; j < inits.column_names.size(); ++j) {
+ switch (cell[j].index()) {
+ case perfetto_sql::ValueIndex<std::monostate>():
+ RETURN_IF_ERROR(res.AddNull(j));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ RETURN_IF_ERROR(dt.builder.AddNull(j));
+ }
+ break;
+ case perfetto_sql::ValueIndex<int64_t>(): {
+ int64_t r = std::get<int64_t>(cell[j]);
+ RETURN_IF_ERROR(res.AddInteger(j, r));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ RETURN_IF_ERROR(dt.builder.AddInteger(j, r));
+ }
+ break;
+ }
+ case perfetto_sql::ValueIndex<double>(): {
+ double r = std::get<double>(cell[j]);
+ RETURN_IF_ERROR(res.AddFloat(j, r));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ RETURN_IF_ERROR(dt.builder.AddFloat(j, r));
+ }
+ break;
+ }
+ case perfetto_sql::ValueIndex<std::string>(): {
+ const char* r = std::get<std::string>(cell[j]).c_str();
+ RETURN_IF_ERROR(res.AddText(j, r));
+ for (uint32_t outgoing : GetEdges(id)) {
+ auto& dt = tables_per_depth[state[outgoing].depth];
+ RETURN_IF_ERROR(dt.builder.AddText(j, r));
+ }
+ break;
+ }
+ default:
+ PERFETTO_FATAL("Invalid index");
+ }
+ }
+ }
+ return base::OkStatus();
+}
+
+base::StatusOr<std::unique_ptr<RuntimeTable>> GraphScanner::Run() {
+ if (!inits.id_column_index) {
+ return base::ErrStatus(
+ "GRAPH_SCAN: 'id' column is not present in initial nodes table");
+ }
+ if (inits.id_column_index != 0) {
+ return base::ErrStatus(
+ "GRAPH_SCAN: 'id' column must be the first column in the initial "
+ "nodes table");
+ }
+
+ // The basic idea of this algorithm is as follows:
+ // 1) Setup the state vector by figuring out the maximum id in the initial and
+ // graph tables.
+ // 2) Do a DFS to compute the depth of each node and figure out the max depth.
+ // 3) Setup all the table builders for each depth.
+ // 4) For all the starting nodes, push down their values to their dependents
+ // and also store the aggregates in the final result table.
+ // 5) Going from highest depth downward, run the aggregation SQL the user
+ // specified, push down those values to their dependents and also store the
+ // aggregates in the final result table.
+ // 6) Return the final result table.
+ //
+ // The complexity of this algorithm is O(n) in both memory and CPU.
+ //
+ // TODO(lalitm): there is a significant optimization we can do here: instead
+ // of pulling the data from SQL to C++ and then feeding that to the runtime
+ // table builder, we could just have an aggregate function which directly
+ // writes into the table itself. This would be better because:
+ // 1) It would be faster
+ // 2) It would remove the need for first creating a row dataframe and then a
+ // table builder for the initial nodes
+ // 3) It would allow code deduplication between the initial query, the step
+ // query and also CREATE PERFETTO TABLE: the code here is very similar to
+ // the code in PerfettoSqlEngine.
+
+ RuntimeTable::Builder res(pool, inits.column_names);
+ uint32_t res_row_count = 0;
+ uint32_t max_depth = DfsAndComputeMaxDepth(InitializeStateFromMaxNode());
+
+ for (uint32_t i = 0; i < max_depth + 1; ++i) {
+ tables_per_depth.emplace_back(
+ DepthTable{RuntimeTable::Builder(pool, inits.column_names), 0});
+ }
+
+ RETURN_IF_ERROR(PushDownStartingAggregates(res, res_row_count));
+ ASSIGN_OR_RETURN(auto agg_stmt, PrepareStatement());
+ RETURN_IF_ERROR(agg_stmt.status());
+
+ uint32_t agg_col_count = sqlite::column::Count(agg_stmt.sqlite_stmt());
+ std::vector<std::string> aggregate_cols;
+ aggregate_cols.reserve(agg_col_count);
+ for (uint32_t i = 0; i < agg_col_count; ++i) {
+ aggregate_cols.emplace_back(
+ sqlite::column::Name(agg_stmt.sqlite_stmt(), i));
+ }
+
+ if (aggregate_cols != inits.column_names) {
+ return base::ErrStatus(
+ "graph_scan: aggregate SQL columns do not match init columns");
+ }
+
+ for (auto i = static_cast<int64_t>(tables_per_depth.size() - 1); i >= 0;
+ --i) {
+ int err = sqlite::stmt::Reset(agg_stmt.sqlite_stmt());
+ if (err != SQLITE_OK) {
+ return base::ErrStatus("Failed to reset statement");
+ }
+ auto idx = static_cast<uint32_t>(i);
+ ASSIGN_OR_RETURN(auto depth_tab,
+ std::move(tables_per_depth[idx].builder)
+ .Build(tables_per_depth[idx].row_count));
+ err = sqlite::bind::Pointer(
+ agg_stmt.sqlite_stmt(), 1, depth_tab.release(), "TABLE", [](void* tab) {
+ std::unique_ptr<RuntimeTable>(static_cast<RuntimeTable*>(tab));
+ });
+ if (err != SQLITE_OK) {
+ return base::ErrStatus("Failed to bind pointer %d", err);
+ }
+ RETURN_IF_ERROR(
+ PushDownAggregates(agg_stmt, agg_col_count, res, res_row_count));
+ }
+ return std::move(res).Build(res_row_count);
+}
+
+struct GraphScan : public SqliteFunction<GraphScan> {
+ static constexpr char kName[] = "__intrinsic_graph_scan";
+ static constexpr int kArgCount = 4;
+ struct UserDataContext {
+ PerfettoSqlEngine* engine;
+ StringPool* pool;
+ };
+
+ static void Step(sqlite3_context* ctx, int argc, sqlite3_value** argv) {
+ PERFETTO_DCHECK(argc == kArgCount);
+
+ auto* user_data = GetUserData(ctx);
+ const char* reduce = sqlite::value::Text(argv[2]);
+ if (!reduce) {
+ return sqlite::result::Error(ctx,
+ "graph_scan: aggegate SQL cannot be null");
+ }
+ const char* column_list = sqlite::value::Text(argv[3]);
+ if (!column_list) {
+ return sqlite::result::Error(ctx,
+ "graph_scan: column list cannot be null");
+ }
+
+ std::vector<std::string> col_names{"id"};
+ for (const auto& c :
+ base::SplitString(base::StripChars(column_list, "()", ' '), ",")) {
+ col_names.push_back(base::TrimWhitespace(c));
+ }
+
+ const auto* init = sqlite::value::Pointer<perfetto_sql::RowDataframe>(
+ argv[1], "ROW_DATAFRAME");
+ if (!init) {
+ SQLITE_ASSIGN_OR_RETURN(
+ ctx, auto table,
+ RuntimeTable::Builder(user_data->pool, std::move(col_names))
+ .Build(0));
+ return sqlite::result::UniquePointer(ctx, std::move(table), "TABLE");
+ }
+ if (col_names != init->column_names) {
+ return sqlite::result::Error(
+ ctx, "graph_scan: column list does not match initial table list");
+ }
+
+ const auto* nodes =
+ sqlite::value::Pointer<perfetto_sql::Graph>(argv[0], "GRAPH");
+ GraphScanner scanner{
+ user_data->engine,
+ user_data->pool,
+ nodes ? *nodes : perfetto_sql::Graph(),
+ *init,
+ reduce,
+ {},
+ {},
+ {},
+ };
+ auto result = scanner.Run();
+ if (!result.ok()) {
+ return sqlite::utils::SetError(ctx, result.status());
+ }
+ return sqlite::result::UniquePointer(ctx, std::move(*result), "TABLE");
+ }
+};
+
+} // namespace
+
+base::Status RegisterGraphScanFunctions(PerfettoSqlEngine& engine,
+ StringPool* pool) {
+ return engine.RegisterSqliteFunction<GraphScan>(
+ std::make_unique<GraphScan::UserDataContext>(
+ GraphScan::UserDataContext{&engine, pool}));
+}
+
+} // namespace perfetto::trace_processor
diff --git a/src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.h b/src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.h
new file mode 100644
index 0000000..10efcd9
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_PERFETTO_SQL_INTRINSICS_FUNCTIONS_GRAPH_SCAN_H_
+#define SRC_TRACE_PROCESSOR_PERFETTO_SQL_INTRINSICS_FUNCTIONS_GRAPH_SCAN_H_
+
+#include "perfetto/base/status.h"
+#include "src/trace_processor/containers/string_pool.h"
+#include "src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.h"
+
+namespace perfetto::trace_processor {
+
+// Registers all graph scan related functions with |engine|.
+base::Status RegisterGraphScanFunctions(PerfettoSqlEngine& engine,
+ StringPool* pool);
+
+} // namespace perfetto::trace_processor
+
+#endif // SRC_TRACE_PROCESSOR_PERFETTO_SQL_INTRINSICS_FUNCTIONS_GRAPH_SCAN_H_
diff --git a/src/trace_processor/perfetto_sql/intrinsics/operators/interval_intersect_operator.h b/src/trace_processor/perfetto_sql/intrinsics/operators/interval_intersect_operator.h
index cdc02f9..896d1a7 100644
--- a/src/trace_processor/perfetto_sql/intrinsics/operators/interval_intersect_operator.h
+++ b/src/trace_processor/perfetto_sql/intrinsics/operators/interval_intersect_operator.h
@@ -75,7 +75,9 @@
query_results.clear();
index = 0;
auto* tree_ptr = trees.Find(tree_key);
- PERFETTO_DCHECK(tree_ptr);
+ if (!tree_ptr) {
+ return;
+ }
(*tree_ptr)->FindOverlaps(start, end, query_results);
}
};
diff --git a/src/trace_processor/perfetto_sql/intrinsics/types/value.h b/src/trace_processor/perfetto_sql/intrinsics/types/value.h
index 6c541d1..026ee23 100644
--- a/src/trace_processor/perfetto_sql/intrinsics/types/value.h
+++ b/src/trace_processor/perfetto_sql/intrinsics/types/value.h
@@ -25,6 +25,21 @@
using Value = std::variant<std::monostate, int64_t, double, std::string>;
+template <typename T>
+inline constexpr uint32_t ValueIndex() {
+ if constexpr (std::is_same_v<T, std::monostate>) {
+ return 0;
+ } else if constexpr (std::is_same_v<T, int64_t>) {
+ return 1;
+ } else if constexpr (std::is_same_v<T, double>) {
+ return 2;
+ } else if constexpr (std::is_same_v<T, std::string>) {
+ return 3;
+ } else {
+ static_assert(!sizeof(T*), "T is not supported");
+ }
+}
+
} // namespace perfetto::trace_processor::perfetto_sql
#endif // SRC_TRACE_PROCESSOR_PERFETTO_SQL_INTRINSICS_TYPES_VALUE_H_
diff --git a/src/trace_processor/perfetto_sql/stdlib/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/BUILD.gn
index 9441df6..c63854e 100644
--- a/src/trace_processor/perfetto_sql/stdlib/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/stdlib/BUILD.gn
@@ -25,11 +25,9 @@
"counters",
"deprecated/v42/common",
"export",
- "gpu",
"graphs",
"intervals",
"linux",
- "memory",
"pkvm",
"prelude",
"sched",
diff --git a/src/trace_processor/perfetto_sql/stdlib/android/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/android/BUILD.gn
index ce4fe77..b1266c0 100644
--- a/src/trace_processor/perfetto_sql/stdlib/android/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/stdlib/android/BUILD.gn
@@ -18,6 +18,7 @@
deps = [
"auto",
"frames",
+ "gpu",
"memory",
"startup",
"winscope",
diff --git a/src/trace_processor/perfetto_sql/stdlib/memory/android/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/android/gpu/BUILD.gn
similarity index 87%
rename from src/trace_processor/perfetto_sql/stdlib/memory/android/BUILD.gn
rename to src/trace_processor/perfetto_sql/stdlib/android/gpu/BUILD.gn
index 55123b6..09a5e85 100644
--- a/src/trace_processor/perfetto_sql/stdlib/memory/android/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/stdlib/android/gpu/BUILD.gn
@@ -14,6 +14,9 @@
import("../../../../../../gn/perfetto_sql.gni")
-perfetto_sql_source_set("android") {
- sources = [ "gpu.sql" ]
+perfetto_sql_source_set("gpu") {
+ sources = [
+ "frequency.sql",
+ "memory.sql",
+ ]
}
diff --git a/src/trace_processor/perfetto_sql/stdlib/gpu/frequency.sql b/src/trace_processor/perfetto_sql/stdlib/android/gpu/frequency.sql
similarity index 95%
rename from src/trace_processor/perfetto_sql/stdlib/gpu/frequency.sql
rename to src/trace_processor/perfetto_sql/stdlib/android/gpu/frequency.sql
index 2bd87b2..e2511eb 100644
--- a/src/trace_processor/perfetto_sql/stdlib/gpu/frequency.sql
+++ b/src/trace_processor/perfetto_sql/stdlib/android/gpu/frequency.sql
@@ -17,7 +17,7 @@
INCLUDE PERFETTO MODULE counters.intervals;
-- GPU frequency counter per GPU.
-CREATE PERFETTO TABLE gpu_frequency(
+CREATE PERFETTO TABLE android_gpu_frequency(
-- Timestamp
ts INT,
-- Duration
@@ -39,4 +39,4 @@
ON t.id = c.track_id AND t.name = 'gpufreq'
WHERE gpu_id IS NOT NULL
))
-JOIN gpu_counter_track t ON t.id = track_id;
\ No newline at end of file
+JOIN gpu_counter_track t ON t.id = track_id;
diff --git a/src/trace_processor/perfetto_sql/stdlib/memory/android/gpu.sql b/src/trace_processor/perfetto_sql/stdlib/android/gpu/memory.sql
similarity index 77%
rename from src/trace_processor/perfetto_sql/stdlib/memory/android/gpu.sql
rename to src/trace_processor/perfetto_sql/stdlib/android/gpu/memory.sql
index 39abaec..3eb8e11 100644
--- a/src/trace_processor/perfetto_sql/stdlib/memory/android/gpu.sql
+++ b/src/trace_processor/perfetto_sql/stdlib/android/gpu/memory.sql
@@ -13,18 +13,18 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
-INCLUDE PERFETTO MODULE memory.linux.general;
+INCLUDE PERFETTO MODULE linux.memory.general;
-- Counter for GPU memory per process with duration.
-CREATE PERFETTO TABLE memory_gpu_per_process(
- -- Timestamp
- ts INT,
- -- Duration
- dur INT,
- -- Upid of the process
- upid INT,
- -- GPU memory
- gpu_memory INT
+CREATE PERFETTO TABLE android_gpu_memory_per_process(
+ -- Timestamp
+ ts INT,
+ -- Duration
+ dur INT,
+ -- Upid of the process
+ upid INT,
+ -- GPU memory
+ gpu_memory INT
) AS
SELECT
ts,
@@ -32,4 +32,4 @@
upid,
cast_int!(value) AS gpu_memory
FROM _all_counters_per_process
-WHERE name = 'GPU Memory';
\ No newline at end of file
+WHERE name = 'GPU Memory';
diff --git a/src/trace_processor/perfetto_sql/stdlib/android/memory/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/android/memory/BUILD.gn
index c308bcd..e6555fc 100644
--- a/src/trace_processor/perfetto_sql/stdlib/android/memory/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/stdlib/android/memory/BUILD.gn
@@ -16,5 +16,5 @@
perfetto_sql_source_set("memory") {
deps = [ "heap_graph" ]
- sources = []
+ sources = [ "process.sql" ]
}
diff --git a/src/trace_processor/perfetto_sql/stdlib/android/memory/process.sql b/src/trace_processor/perfetto_sql/stdlib/android/memory/process.sql
new file mode 100644
index 0000000..efa56e1
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/stdlib/android/memory/process.sql
@@ -0,0 +1,99 @@
+--
+-- Copyright 2024 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the 'License');
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an 'AS IS' BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+INCLUDE PERFETTO MODULE android.oom_adjuster;
+INCLUDE PERFETTO MODULE linux.memory.process;
+
+-- OOM score tables
+
+CREATE VIRTUAL TABLE _mem_ooms_sj
+USING SPAN_OUTER_JOIN(
+ android_oom_adj_intervals PARTITIONED upid,
+ _memory_rss_and_swap_per_process_table PARTITIONED upid);
+
+-- Process memory and it's OOM adjuster scores. Detects transitions, each new
+-- interval means that either the memory or OOM adjuster score of the process changed.
+CREATE PERFETTO TABLE memory_oom_score_with_rss_and_swap_per_process(
+ -- Timestamp the oom_adj score or memory of the process changed
+ ts INT,
+ -- Duration until the next oom_adj score or memory change of the process.
+ dur INT,
+ -- oom adjuster score of the process.
+ score INT,
+ -- oom adjuster bucket of the process.
+ bucket STRING,
+ -- Upid of the process having an oom_adj update.
+ upid INT,
+ -- Name of the process having an oom_adj update.
+ process_name STRING,
+ -- Pid of the process having an oom_adj update.
+ pid INT,
+ -- Slice of the latest oom_adj update in the system_server. Alias of
+ -- `slice.id`.
+ oom_adj_id INT,
+ -- Timestamp of the latest oom_adj update in the system_server.
+ oom_adj_ts INT,
+ -- Duration of the latest oom_adj update in the system_server.
+ oom_adj_dur INT,
+ -- Track of the latest oom_adj update in the system_server. Alias of
+ -- `track.id`.
+ oom_adj_track_id INT,
+ -- Thread name of the latest oom_adj update in the system_server.
+ oom_adj_thread_name STRING,
+ -- Reason for the latest oom_adj update in the system_server.
+ oom_adj_reason STRING,
+ -- Trigger for the latest oom_adj update in the system_server.
+ oom_adj_trigger STRING,
+ -- Anon RSS counter value
+ anon_rss INT,
+ -- File RSS counter value
+ file_rss INT,
+ -- Shared memory RSS counter value
+ shmem_rss INT,
+ -- Total RSS value. Sum of `anon_rss`, `file_rss` and `shmem_rss`. Returns
+ -- value even if one of the values is NULL.
+ rss INT,
+ -- Swap counter value
+ swap INT,
+ -- Sum or `anon_rss` and `swap`. Returns value even if one of the values is
+ -- NULL.
+ anon_rss_and_swap INT,
+ -- Sum or `rss` and `swap`. Returns value even if one of the values is NULL.
+ rss_and_swap INT
+) AS
+SELECT
+ ts,
+ dur,
+ score,
+ bucket,
+ upid,
+ process_name,
+ pid,
+ oom_adj_id,
+ oom_adj_ts,
+ oom_adj_dur,
+ oom_adj_track_id,
+ oom_adj_thread_name,
+ oom_adj_reason,
+ oom_adj_trigger,
+ anon_rss,
+ file_rss,
+ shmem_rss,
+ file_rss + anon_rss + COALESCE(shmem_rss, 0) AS rss,
+ swap,
+ anon_rss + COALESCE(swap, 0) AS anon_rss_and_swap,
+ anon_rss + file_rss + COALESCE(shmem_rss, 0) + COALESCE(swap, 0) AS rss_and_swap
+FROM _mem_ooms_sj
+JOIN process USING (upid);
diff --git a/src/trace_processor/perfetto_sql/stdlib/gpu/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/gpu/BUILD.gn
deleted file mode 100644
index a5f431d..0000000
--- a/src/trace_processor/perfetto_sql/stdlib/gpu/BUILD.gn
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (C) 2024 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import("../../../../../gn/perfetto_sql.gni")
-
-perfetto_sql_source_set("gpu") {
- sources = [ "frequency.sql" ]
-}
diff --git a/src/trace_processor/perfetto_sql/stdlib/graphs/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/graphs/BUILD.gn
index ba5bb3e..1710494 100644
--- a/src/trace_processor/perfetto_sql/stdlib/graphs/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/stdlib/graphs/BUILD.gn
@@ -18,6 +18,7 @@
sources = [
"dominator_tree.sql",
"partition.sql",
+ "scan.sql",
"search.sql",
]
}
diff --git a/src/trace_processor/perfetto_sql/stdlib/graphs/scan.sql b/src/trace_processor/perfetto_sql/stdlib/graphs/scan.sql
new file mode 100644
index 0000000..7243545
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/stdlib/graphs/scan.sql
@@ -0,0 +1,85 @@
+--
+-- Copyright 2024 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+CREATE PERFETTO MACRO _graph_scan_df_agg(x Expr, y Expr)
+RETURNS Expr AS __intrinsic_stringify!($x), $y;
+
+CREATE PERFETTO MACRO _graph_scan_bind(x Expr, y Expr)
+RETURNS Expr AS __intrinsic_table_ptr_bind($x, __intrinsic_stringify!($y));
+
+CREATE PERFETTO MACRO _graph_scan_select(x Expr, y Expr)
+RETURNS Expr AS $x as $y;
+
+-- Performs a "scan" over the grapu starting at `init_table` and using `graph_table`
+-- for edges to follow.
+--
+-- See https://en.wikipedia.org/wiki/Prefix_sum#Scan_higher_order_function for
+-- details of what a scan means.
+CREATE PERFETTO MACRO _graph_scan(
+ -- The table containing the edges of the graph. Needs to have the columns
+ -- `source_node_id` and `dest_node_id`.
+ graph_table TableOrSubquery,
+ -- The table of nodes to start the scan from. Needs to have the column `id`
+ -- and all columns specified by `agg_columns`.
+ init_table TableOrSubquery,
+ -- A paranthesised and comma separated list of columns which will be returned
+ -- by the scan. Should match exactly both the names and order of the columns
+ -- in `init_table` and `agg_query`.
+ --
+ -- Example: (cumulative_sum, cumulative_count).
+ agg_columns ColumnNameList,
+ -- A subquery which aggregates the data for one step of the scan. Should contain
+ -- the column `id` and all columns specified by `agg_columns`. Should read from
+ -- a variable table labelled `$table`.
+ agg_query TableOrSubquery
+)
+RETURNS TableOrSubquery AS
+(
+ select
+ c0 as id,
+ __intrinsic_token_zip_join!(
+ (c1, c2, c3, c4, c5, c6, c7),
+ $agg_columns,
+ _graph_scan_select,
+ __intrinsic_token_comma!()
+ )
+ from __intrinsic_table_ptr(__intrinsic_graph_scan(
+ (
+ select __intrinsic_graph_agg(g.source_node_id, g.dest_node_id)
+ from $graph_table g
+ ),
+ (
+ select __intrinsic_row_dataframe_agg(
+ 'id', s.id,
+ __intrinsic_token_zip_join!(
+ $agg_columns,
+ $agg_columns,
+ _graph_scan_df_agg,
+ __intrinsic_token_comma!()
+ )
+ )
+ from $init_table s
+ ),
+ __intrinsic_stringify!($agg_query),
+ __intrinsic_stringify!($agg_columns)
+ ))
+ where __intrinsic_table_ptr_bind(c0, 'id')
+ and __intrinsic_token_zip_join!(
+ (c1, c2, c3, c4, c5, c6, c7),
+ $agg_columns,
+ _graph_scan_bind,
+ AND
+ )
+);
diff --git a/src/trace_processor/perfetto_sql/stdlib/linux/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/linux/BUILD.gn
index 7014a70..7c4dd47 100644
--- a/src/trace_processor/perfetto_sql/stdlib/linux/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/stdlib/linux/BUILD.gn
@@ -16,5 +16,8 @@
perfetto_sql_source_set("linux") {
sources = []
- deps = [ "cpu" ]
+ deps = [
+ "cpu",
+ "memory",
+ ]
}
diff --git a/src/trace_processor/perfetto_sql/stdlib/memory/linux/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/linux/memory/BUILD.gn
similarity index 95%
rename from src/trace_processor/perfetto_sql/stdlib/memory/linux/BUILD.gn
rename to src/trace_processor/perfetto_sql/stdlib/linux/memory/BUILD.gn
index 0711197..050d387 100644
--- a/src/trace_processor/perfetto_sql/stdlib/memory/linux/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/stdlib/linux/memory/BUILD.gn
@@ -14,7 +14,7 @@
import("../../../../../../gn/perfetto_sql.gni")
-perfetto_sql_source_set("linux") {
+perfetto_sql_source_set("memory") {
sources = [
"general.sql",
"high_watermark.sql",
diff --git a/src/trace_processor/perfetto_sql/stdlib/memory/linux/general.sql b/src/trace_processor/perfetto_sql/stdlib/linux/memory/general.sql
similarity index 100%
rename from src/trace_processor/perfetto_sql/stdlib/memory/linux/general.sql
rename to src/trace_processor/perfetto_sql/stdlib/linux/memory/general.sql
diff --git a/src/trace_processor/perfetto_sql/stdlib/memory/linux/high_watermark.sql b/src/trace_processor/perfetto_sql/stdlib/linux/memory/high_watermark.sql
similarity index 97%
rename from src/trace_processor/perfetto_sql/stdlib/memory/linux/high_watermark.sql
rename to src/trace_processor/perfetto_sql/stdlib/linux/memory/high_watermark.sql
index b01f2df..b43a75c 100644
--- a/src/trace_processor/perfetto_sql/stdlib/memory/linux/high_watermark.sql
+++ b/src/trace_processor/perfetto_sql/stdlib/linux/memory/high_watermark.sql
@@ -13,8 +13,8 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
-INCLUDE PERFETTO MODULE memory.linux.process;
INCLUDE PERFETTO MODULE counters.intervals;
+INCLUDE PERFETTO MODULE linux.memory.process;
CREATE PERFETTO TABLE _memory_rss_high_watermark_per_process_table AS
WITH with_rss AS (
@@ -64,4 +64,4 @@
name AS process_name,
rss_high_watermark
FROM _memory_rss_high_watermark_per_process_table
-JOIN process USING (upid);
\ No newline at end of file
+JOIN process USING (upid);
diff --git a/src/trace_processor/perfetto_sql/stdlib/linux/memory/process.sql b/src/trace_processor/perfetto_sql/stdlib/linux/memory/process.sql
new file mode 100644
index 0000000..771843c
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/stdlib/linux/memory/process.sql
@@ -0,0 +1,134 @@
+--
+-- Copyright 2024 The Android Open Source Project
+--
+-- Licensed under the Apache License, Version 2.0 (the 'License');
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- https://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an 'AS IS' BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+INCLUDE PERFETTO MODULE linux.memory.general;
+
+-- All memory counters tables.
+
+CREATE PERFETTO VIEW _anon_rss AS
+SELECT
+ ts,
+ dur,
+ upid,
+ value AS anon_rss_val
+FROM _all_counters_per_process
+WHERE name = 'mem.rss.anon';
+
+CREATE PERFETTO VIEW _file_rss AS
+SELECT
+ ts,
+ dur,
+ upid,
+ value AS file_rss_val
+FROM _all_counters_per_process
+WHERE name = 'mem.rss.file';
+
+CREATE PERFETTO VIEW _shmem_rss AS
+SELECT
+ ts,
+ dur,
+ upid,
+ value AS shmem_rss_val
+FROM _all_counters_per_process
+WHERE name = 'mem.rss.shmem';
+
+CREATE PERFETTO VIEW _swap AS
+SELECT
+ ts,
+ dur,
+ upid,
+ value AS swap_val
+FROM _all_counters_per_process
+WHERE name = 'mem.swap';
+
+-- Span joins
+
+CREATE VIRTUAL TABLE _anon_swap_sj
+USING SPAN_OUTER_JOIN(
+ _anon_rss PARTITIONED upid,
+ _swap PARTITIONED upid);
+
+CREATE VIRTUAL TABLE _anon_swap_file_sj
+USING SPAN_OUTER_JOIN(
+ _anon_swap_sj PARTITIONED upid,
+ _file_rss PARTITIONED upid
+);
+
+CREATE VIRTUAL TABLE _rss_swap_sj
+USING SPAN_OUTER_JOIN(
+ _anon_swap_file_sj PARTITIONED upid,
+ _shmem_rss PARTITIONED upid
+);
+
+CREATE PERFETTO TABLE _memory_rss_and_swap_per_process_table AS
+SELECT
+ ts, dur, upid,
+ cast_int!(anon_rss_val) AS anon_rss,
+ cast_int!(file_rss_val) AS file_rss,
+ cast_int!(shmem_rss_val) AS shmem_rss,
+ cast_int!(swap_val) AS swap
+FROM _rss_swap_sj;
+
+
+-- Memory metrics timeline for each process.
+CREATE PERFETTO VIEW memory_rss_and_swap_per_process(
+ -- Timestamp
+ ts INT,
+ -- Duration
+ dur INT,
+ -- Upid of the process
+ upid INT,
+ -- Pid of the process
+ pid INT,
+ -- Name of the process
+ process_name STRING,
+ -- Anon RSS counter value
+ anon_rss INT,
+ -- File RSS counter value
+ file_rss INT,
+ -- Shared memory RSS counter value
+ shmem_rss INT,
+ -- Total RSS value. Sum of `anon_rss`, `file_rss` and `shmem_rss`. Returns
+ -- value even if one of the values is NULL.
+ rss INT,
+ -- Swap counter value
+ swap INT,
+ -- Sum or `anon_rss` and `swap`. Returns value even if one of the values is
+ -- NULL.
+ anon_rss_and_swap INT,
+ -- Sum or `rss` and `swap`. Returns value even if one of the values is NULL.
+ rss_and_swap INT
+) AS
+SELECT
+ ts,
+ dur,
+ upid,
+ pid,
+ name AS process_name,
+ anon_rss,
+ file_rss,
+ shmem_rss,
+ -- We do COALESCE only on `shmem_rss` and `swap`, as it can be expected all
+ -- process start to emit anon rss and file rss events (you'll need to at
+ -- least read code and have some memory to work with) - so the NULLs are real
+ -- values. But it is possible that you will never swap or never use shmem,
+ -- so those values are expected to often be NULLs, which shouldn't propagate
+ -- into the values like `anon_and_swap` or `rss`.
+ file_rss + anon_rss + COALESCE(shmem_rss, 0) AS rss,
+ swap,
+ anon_rss + COALESCE(swap, 0) AS anon_rss_and_swap,
+ anon_rss + file_rss + COALESCE(shmem_rss, 0) + COALESCE(swap, 0) AS rss_and_swap
+FROM _memory_rss_and_swap_per_process_table
+JOIN process USING (upid);
diff --git a/src/trace_processor/perfetto_sql/stdlib/memory/BUILD.gn b/src/trace_processor/perfetto_sql/stdlib/memory/BUILD.gn
deleted file mode 100644
index 44a3fe1..0000000
--- a/src/trace_processor/perfetto_sql/stdlib/memory/BUILD.gn
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (C) 2024 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import("../../../../../gn/perfetto_sql.gni")
-
-perfetto_sql_source_set("memory") {
- deps = [
- "android",
- "linux",
- ]
- sources = []
-}
diff --git a/src/trace_processor/perfetto_sql/stdlib/memory/linux/process.sql b/src/trace_processor/perfetto_sql/stdlib/memory/linux/process.sql
deleted file mode 100644
index 4601c21..0000000
--- a/src/trace_processor/perfetto_sql/stdlib/memory/linux/process.sql
+++ /dev/null
@@ -1,217 +0,0 @@
---
--- Copyright 2024 The Android Open Source Project
---
--- Licensed under the Apache License, Version 2.0 (the 'License');
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
--- https://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an 'AS IS' BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-INCLUDE PERFETTO MODULE android.oom_adjuster;
-INCLUDE PERFETTO MODULE memory.linux.general;
-
--- All memory counters tables.
-
-CREATE PERFETTO VIEW _anon_rss AS
-SELECT
- ts,
- dur,
- upid,
- value AS anon_rss_val
-FROM _all_counters_per_process
-WHERE name = 'mem.rss.anon';
-
-CREATE PERFETTO VIEW _file_rss AS
-SELECT
- ts,
- dur,
- upid,
- value AS file_rss_val
-FROM _all_counters_per_process
-WHERE name = 'mem.rss.file';
-
-CREATE PERFETTO VIEW _shmem_rss AS
-SELECT
- ts,
- dur,
- upid,
- value AS shmem_rss_val
-FROM _all_counters_per_process
-WHERE name = 'mem.rss.shmem';
-
-CREATE PERFETTO VIEW _swap AS
-SELECT
- ts,
- dur,
- upid,
- value AS swap_val
-FROM _all_counters_per_process
-WHERE name = 'mem.swap';
-
--- Span joins
-
-CREATE VIRTUAL TABLE _anon_swap_sj
-USING SPAN_OUTER_JOIN(
- _anon_rss PARTITIONED upid,
- _swap PARTITIONED upid);
-
-CREATE VIRTUAL TABLE _anon_swap_file_sj
-USING SPAN_OUTER_JOIN(
- _anon_swap_sj PARTITIONED upid,
- _file_rss PARTITIONED upid
-);
-
-CREATE VIRTUAL TABLE _rss_swap_sj
-USING SPAN_OUTER_JOIN(
- _anon_swap_file_sj PARTITIONED upid,
- _shmem_rss PARTITIONED upid
-);
-
-CREATE PERFETTO TABLE _memory_rss_and_swap_per_process_table AS
-SELECT
- ts, dur, upid,
- cast_int!(anon_rss_val) AS anon_rss,
- cast_int!(file_rss_val) AS file_rss,
- cast_int!(shmem_rss_val) AS shmem_rss,
- cast_int!(swap_val) AS swap
-FROM _rss_swap_sj;
-
-
--- Memory metrics timeline for each process.
-CREATE PERFETTO VIEW memory_rss_and_swap_per_process(
- -- Timestamp
- ts INT,
- -- Duration
- dur INT,
- -- Upid of the process
- upid INT,
- -- Pid of the process
- pid INT,
- -- Name of the process
- process_name STRING,
- -- Anon RSS counter value
- anon_rss INT,
- -- File RSS counter value
- file_rss INT,
- -- Shared memory RSS counter value
- shmem_rss INT,
- -- Total RSS value. Sum of `anon_rss`, `file_rss` and `shmem_rss`. Returns
- -- value even if one of the values is NULL.
- rss INT,
- -- Swap counter value
- swap INT,
- -- Sum or `anon_rss` and `swap`. Returns value even if one of the values is
- -- NULL.
- anon_rss_and_swap INT,
- -- Sum or `rss` and `swap`. Returns value even if one of the values is NULL.
- rss_and_swap INT
-) AS
-SELECT
- ts,
- dur,
- upid,
- pid,
- name AS process_name,
- anon_rss,
- file_rss,
- shmem_rss,
- -- We do COALESCE only on `shmem_rss` and `swap`, as it can be expected all
- -- process start to emit anon rss and file rss events (you'll need to at
- -- least read code and have some memory to work with) - so the NULLs are real
- -- values. But it is possible that you will never swap or never use shmem,
- -- so those values are expected to often be NULLs, which shouldn't propagate
- -- into the values like `anon_and_swap` or `rss`.
- file_rss + anon_rss + COALESCE(shmem_rss, 0) AS rss,
- swap,
- anon_rss + COALESCE(swap, 0) AS anon_rss_and_swap,
- anon_rss + file_rss + COALESCE(shmem_rss, 0) + COALESCE(swap, 0) AS rss_and_swap
-FROM _memory_rss_and_swap_per_process_table
-JOIN process USING (upid);
-
--- OOM score tables
-
-CREATE VIRTUAL TABLE _mem_ooms_sj
-USING SPAN_OUTER_JOIN(
- android_oom_adj_intervals PARTITIONED upid,
- _memory_rss_and_swap_per_process_table PARTITIONED upid);
-
--- Process memory and it's OOM adjuster scores. Detects transitions, each new
--- interval means that either the memory or OOM adjuster score of the process changed.
-CREATE PERFETTO TABLE memory_oom_score_with_rss_and_swap_per_process(
- -- Timestamp the oom_adj score or memory of the process changed
- ts INT,
- -- Duration until the next oom_adj score or memory change of the process.
- dur INT,
- -- oom adjuster score of the process.
- score INT,
- -- oom adjuster bucket of the process.
- bucket STRING,
- -- Upid of the process having an oom_adj update.
- upid INT,
- -- Name of the process having an oom_adj update.
- process_name STRING,
- -- Pid of the process having an oom_adj update.
- pid INT,
- -- Slice of the latest oom_adj update in the system_server. Alias of
- -- `slice.id`.
- oom_adj_id INT,
- -- Timestamp of the latest oom_adj update in the system_server.
- oom_adj_ts INT,
- -- Duration of the latest oom_adj update in the system_server.
- oom_adj_dur INT,
- -- Track of the latest oom_adj update in the system_server. Alias of
- -- `track.id`.
- oom_adj_track_id INT,
- -- Thread name of the latest oom_adj update in the system_server.
- oom_adj_thread_name STRING,
- -- Reason for the latest oom_adj update in the system_server.
- oom_adj_reason STRING,
- -- Trigger for the latest oom_adj update in the system_server.
- oom_adj_trigger STRING,
- -- Anon RSS counter value
- anon_rss INT,
- -- File RSS counter value
- file_rss INT,
- -- Shared memory RSS counter value
- shmem_rss INT,
- -- Total RSS value. Sum of `anon_rss`, `file_rss` and `shmem_rss`. Returns
- -- value even if one of the values is NULL.
- rss INT,
- -- Swap counter value
- swap INT,
- -- Sum or `anon_rss` and `swap`. Returns value even if one of the values is
- -- NULL.
- anon_rss_and_swap INT,
- -- Sum or `rss` and `swap`. Returns value even if one of the values is NULL.
- rss_and_swap INT
-) AS
-SELECT
- ts,
- dur,
- score,
- bucket,
- upid,
- process_name,
- pid,
- oom_adj_id,
- oom_adj_ts,
- oom_adj_dur,
- oom_adj_track_id,
- oom_adj_thread_name,
- oom_adj_reason,
- oom_adj_trigger,
- anon_rss,
- file_rss,
- shmem_rss,
- file_rss + anon_rss + COALESCE(shmem_rss, 0) AS rss,
- swap,
- anon_rss + COALESCE(swap, 0) AS anon_rss_and_swap,
- anon_rss + file_rss + COALESCE(shmem_rss, 0) + COALESCE(swap, 0) AS rss_and_swap
-FROM _mem_ooms_sj
-JOIN process USING (upid);
diff --git a/src/trace_processor/perfetto_sql/stdlib/prelude/tables_views.sql b/src/trace_processor/perfetto_sql/stdlib/prelude/tables_views.sql
index 402ac52..747b874 100644
--- a/src/trace_processor/perfetto_sql/stdlib/prelude/tables_views.sql
+++ b/src/trace_processor/perfetto_sql/stdlib/prelude/tables_views.sql
@@ -12,7 +12,8 @@
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--- Contains information of CPUs seen during the trace.
+
+-- Contains information about the CPUs on the device this trace was taken on.
CREATE PERFETTO VIEW cpu (
-- Unique identifier for this CPU. Identical to |ucpu|, prefer using |ucpu|
-- instead.
@@ -44,8 +45,9 @@
WHERE
cpu IS NOT NULL;
--- Contains information of available frequencies of CPUs.
-CREATE PERFETTO VIEW cpu_frequencies (
+-- Contains the frequency values that the CPUs on the device are capable of
+-- running at.
+CREATE PERFETTO VIEW cpu_available_frequencies (
-- Unique identifier for this cpu frequency.
id UINT,
-- The CPU for this frequency, meaningful only in single machine traces.
diff --git a/src/trace_processor/sqlite/bindings/BUILD.gn b/src/trace_processor/sqlite/bindings/BUILD.gn
index 9205585..0f30567 100644
--- a/src/trace_processor/sqlite/bindings/BUILD.gn
+++ b/src/trace_processor/sqlite/bindings/BUILD.gn
@@ -19,9 +19,12 @@
source_set("bindings") {
sources = [
"sqlite_aggregate_function.h",
+ "sqlite_bind.h",
+ "sqlite_column.h",
"sqlite_function.h",
"sqlite_module.h",
"sqlite_result.h",
+ "sqlite_stmt.h",
"sqlite_type.h",
"sqlite_value.h",
"sqlite_window_function.h",
diff --git a/src/trace_processor/sqlite/bindings/sqlite_bind.h b/src/trace_processor/sqlite/bindings/sqlite_bind.h
new file mode 100644
index 0000000..a2c7347
--- /dev/null
+++ b/src/trace_processor/sqlite/bindings/sqlite_bind.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_BIND_H_
+#define SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_BIND_H_
+
+#include <sqlite3.h> // IWYU pragma: export
+#include <cstdint>
+
+namespace perfetto::trace_processor::sqlite::bind {
+
+// This file contains wraps the SQLite functions which operate on stmt
+// bindings and start with sqlite3_bind_*.
+
+using PointerDestructor = void(void*);
+inline int Pointer(sqlite3_stmt* stmt,
+ uint32_t N,
+ void* ptr,
+ const char* name,
+ PointerDestructor destructor) {
+ return sqlite3_bind_pointer(stmt, static_cast<int>(N), ptr, name, destructor);
+}
+
+} // namespace perfetto::trace_processor::sqlite::bind
+
+#endif // SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_BIND_H_
diff --git a/src/trace_processor/sqlite/bindings/sqlite_column.h b/src/trace_processor/sqlite/bindings/sqlite_column.h
new file mode 100644
index 0000000..cf1c7c8
--- /dev/null
+++ b/src/trace_processor/sqlite/bindings/sqlite_column.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_COLUMN_H_
+#define SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_COLUMN_H_
+
+#include <sqlite3.h> // IWYU pragma: export
+#include <cstdint>
+
+#include "src/trace_processor/sqlite/bindings/sqlite_type.h"
+
+namespace perfetto::trace_processor::sqlite::column {
+
+// This file contains wraps the SQLite functions which operate on stmt
+// columns and start with sqlite3_column_*.
+
+inline const char* Name(sqlite3_stmt* stmt, uint32_t N) {
+ return sqlite3_column_name(stmt, static_cast<int>(N));
+}
+
+inline uint32_t Count(sqlite3_stmt* stmt) {
+ return static_cast<uint32_t>(sqlite3_column_count(stmt));
+}
+
+inline sqlite::Type Type(sqlite3_stmt* stmt, uint32_t N) {
+ return static_cast<sqlite::Type>(
+ sqlite3_column_type(stmt, static_cast<int>(N)));
+}
+
+inline int64_t Int64(sqlite3_stmt* stmt, uint32_t N) {
+ return sqlite3_column_int64(stmt, static_cast<int>(N));
+}
+
+inline const char* Text(sqlite3_stmt* stmt, uint32_t N) {
+ return reinterpret_cast<const char*>(
+ sqlite3_column_text(stmt, static_cast<int>(N)));
+}
+
+inline double Double(sqlite3_stmt* stmt, uint32_t N) {
+ return sqlite3_column_double(stmt, static_cast<int>(N));
+}
+
+inline sqlite3_value* Value(sqlite3_stmt* stmt, uint32_t N) {
+ return sqlite3_column_value(stmt, static_cast<int>(N));
+}
+
+using PointerDestructor = void(void*);
+inline int BindPointer(sqlite3_stmt* stmt,
+ uint32_t N,
+ void* ptr,
+ const char* name,
+ PointerDestructor destructor) {
+ return sqlite3_bind_pointer(stmt, static_cast<int>(N), ptr, name, destructor);
+}
+
+} // namespace perfetto::trace_processor::sqlite::column
+
+#endif // SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_COLUMN_H_
diff --git a/src/trace_processor/sqlite/bindings/sqlite_stmt.h b/src/trace_processor/sqlite/bindings/sqlite_stmt.h
new file mode 100644
index 0000000..3eec03e
--- /dev/null
+++ b/src/trace_processor/sqlite/bindings/sqlite_stmt.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_STMT_H_
+#define SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_STMT_H_
+
+#include <sqlite3.h> // IWYU pragma: export
+
+namespace perfetto::trace_processor::sqlite::stmt {
+
+// This file contains wraps the SQLite functions which operate on sqlite3_stmt
+// objects.
+
+inline int Reset(sqlite3_stmt* stmt) {
+ return sqlite3_reset(stmt);
+}
+
+} // namespace perfetto::trace_processor::sqlite::stmt
+
+#endif // SRC_TRACE_PROCESSOR_SQLITE_BINDINGS_SQLITE_STMT_H_
diff --git a/src/trace_processor/sqlite/sqlite_utils.h b/src/trace_processor/sqlite/sqlite_utils.h
index ef7a9bc..3f4b713 100644
--- a/src/trace_processor/sqlite/sqlite_utils.h
+++ b/src/trace_processor/sqlite/sqlite_utils.h
@@ -37,7 +37,7 @@
// Analogous to ASSIGN_OR_RETURN macro. Returns an sqlite error.
#define SQLITE_RETURN_IF_ERROR(vtab, expr) \
do { \
- base::Status status_macro_internal_status = (expr); \
+ const base::Status& status_macro_internal_status = (expr); \
if (!status_macro_internal_status.ok()) \
return sqlite::utils::SetError((vtab), status_macro_internal_status); \
} while (0)
diff --git a/src/trace_processor/trace_processor_impl.cc b/src/trace_processor/trace_processor_impl.cc
index f0a8ed2..ca23b0f 100644
--- a/src/trace_processor/trace_processor_impl.cc
+++ b/src/trace_processor/trace_processor_impl.cc
@@ -73,6 +73,7 @@
#include "src/trace_processor/perfetto_sql/intrinsics/functions/create_function.h"
#include "src/trace_processor/perfetto_sql/intrinsics/functions/create_view_function.h"
#include "src/trace_processor/perfetto_sql/intrinsics/functions/dominator_tree.h"
+#include "src/trace_processor/perfetto_sql/intrinsics/functions/graph_scan.h"
#include "src/trace_processor/perfetto_sql/intrinsics/functions/graph_traversal.h"
#include "src/trace_processor/perfetto_sql/intrinsics/functions/import.h"
#include "src/trace_processor/perfetto_sql/intrinsics/functions/layout_functions.h"
@@ -754,6 +755,12 @@
PERFETTO_FATAL("%s", status.c_message());
}
{
+ base::Status status = RegisterGraphScanFunctions(
+ *engine_, context_.storage->mutable_string_pool());
+ if (!status.ok())
+ PERFETTO_FATAL("%s", status.c_message());
+ }
+ {
base::Status status = RegisterGraphTraversalFunctions(
*engine_, *context_.storage->mutable_string_pool());
if (!status.ok())
diff --git a/test/ci/linux_tests.sh b/test/ci/linux_tests.sh
index 1a68291..58db1bf 100755
--- a/test/ci/linux_tests.sh
+++ b/test/ci/linux_tests.sh
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# TODO(ivankc) Remove this when controller rolls out
INSTALL_BUILD_DEPS_ARGS=""
source $(dirname ${BASH_SOURCE[0]})/common.sh
@@ -30,18 +31,18 @@
# that is copied into the target directory (OUT_PATH) cannot run because depends
# on libc++.so within the same folder (which is built using target bitness,
# not host bitness).
-TP_SHELL=${OUT_PATH}/gcc_like_host/trace_processor_shell
-if [ ! -f ${TP_SHELL} ]; then
- TP_SHELL=${OUT_PATH}/trace_processor_shell
+HOST_OUT_PATH=${OUT_PATH}/gcc_like_host
+if [ ! -f ${HOST_OUT_PATH}/trace_processor_shell ]; then
+ HOST_OUT_PATH=${OUT_PATH}
fi
mkdir -p /ci/artifacts/perf
tools/diff_test_trace_processor.py \
--perf-file=/ci/artifacts/perf/tp-perf-all.json \
- ${TP_SHELL}
+ ${HOST_OUT_PATH}/trace_processor_shell
-python/run_tests.py ${TP_SHELL}
+python/run_tests.py ${HOST_OUT_PATH}
# Don't run benchmarks under x86 (running out of address space because of 4GB)
# limit or debug (too slow and pointless).
diff --git a/test/trace_processor/diff_tests/include_index.py b/test/trace_processor/diff_tests/include_index.py
index 8201fad..534344f 100644
--- a/test/trace_processor/diff_tests/include_index.py
+++ b/test/trace_processor/diff_tests/include_index.py
@@ -102,21 +102,23 @@
from diff_tests.stdlib.android.frames_tests import Frames
from diff_tests.stdlib.android.startups_tests import Startups
from diff_tests.stdlib.android.tests import AndroidStdlib
+from diff_tests.stdlib.android.gpu import AndroidGpu
+from diff_tests.stdlib.android.memory import AndroidMemory
from diff_tests.stdlib.chrome.chrome_stdlib_testsuites import CHROME_STDLIB_TESTSUITES
from diff_tests.stdlib.common.tests import StdlibCommon
from diff_tests.stdlib.common.tests import StdlibCommon
from diff_tests.stdlib.counters.tests import StdlibCounterIntervals
from diff_tests.stdlib.dynamic_tables.tests import DynamicTables
from diff_tests.stdlib.export.tests import ExportTests
-from diff_tests.stdlib.gpu.tests import Gpu
from diff_tests.stdlib.graphs.dominator_tree_tests import DominatorTree
from diff_tests.stdlib.graphs.partition_tests import GraphPartitionTests
+from diff_tests.stdlib.graphs.scan_tests import GraphScanTests
from diff_tests.stdlib.graphs.search_tests import GraphSearchTests
from diff_tests.stdlib.intervals.intersect_tests import IntervalsIntersect
from diff_tests.stdlib.intervals.tests import StdlibIntervals
from diff_tests.stdlib.linux.cpu import LinuxCpu
+from diff_tests.stdlib.linux.memory import Memory
from diff_tests.stdlib.android.heap_graph_tests import HeapGraph
-from diff_tests.stdlib.memory.tests import Memory
from diff_tests.stdlib.pkvm.tests import Pkvm
from diff_tests.stdlib.prelude.math_functions_tests import PreludeMathFunctions
from diff_tests.stdlib.prelude.pprof_functions_tests import PreludePprofFunctions
@@ -268,12 +270,14 @@
chrome_stdlib_tests += test_suite.fetch()
stdlib_tests = [
+ *AndroidMemory(index_path, 'stdlib/android', 'AndroidMemory').fetch(),
+ *AndroidGpu(index_path, 'stdlib/android', 'AndroidGpu').fetch(),
*AndroidStdlib(index_path, 'stdlib/android', 'AndroidStdlib').fetch(),
*LinuxCpu(index_path, 'stdlib/linux/cpu', 'LinuxCpu').fetch(),
*DominatorTree(index_path, 'stdlib/graphs', 'DominatorTree').fetch(),
+ *GraphScanTests(index_path, 'stdlib/graphs', 'GraphScan').fetch(),
*ExportTests(index_path, 'stdlib/export', 'ExportTests').fetch(),
*Frames(index_path, 'stdlib/android', 'Frames').fetch(),
- *Gpu(index_path, 'stdlib/gpu', 'Gpu').fetch(),
*GraphSearchTests(index_path, 'stdlib/graphs',
'GraphSearchTests').fetch(),
*GraphPartitionTests(index_path, 'stdlib/graphs',
@@ -282,7 +286,7 @@
'StdlibCounterIntervals').fetch(),
*DynamicTables(index_path, 'stdlib/dynamic_tables',
'DynamicTables').fetch(),
- *Memory(index_path, 'stdlib/memory', 'Memory').fetch(),
+ *Memory(index_path, 'stdlib/linux', 'Memory').fetch(),
*PreludeMathFunctions(index_path, 'stdlib/prelude',
'PreludeMathFunctions').fetch(),
*HeapGraph(index_path, 'stdlib/android',
diff --git a/test/trace_processor/diff_tests/metrics/memory/tests.py b/test/trace_processor/diff_tests/metrics/memory/tests.py
index a5a5cec..ad774d4 100644
--- a/test/trace_processor/diff_tests/metrics/memory/tests.py
+++ b/test/trace_processor/diff_tests/metrics/memory/tests.py
@@ -371,3 +371,53 @@
"name","ts","dur","name"
"mem.dma_buffer",100,100,"1 kB"
"""))
+
+ def test_android_dma_heap_inode(self):
+ return DiffTestBlueprint(
+ trace=TextProto(r"""
+ packet {
+ ftrace_events {
+ cpu: 0
+ event {
+ timestamp: 100
+ pid: 1
+ dma_heap_stat {
+ inode: 123
+ len: 1024
+ total_allocated: 2048
+ }
+ }
+ }
+ }
+ packet {
+ ftrace_events {
+ cpu: 0
+ event {
+ timestamp: 200
+ pid: 1
+ dma_heap_stat {
+ inode: 123
+ len: -1024
+ total_allocated: 1024
+ }
+ }
+ }
+ }
+ """),
+ query="""
+ SELECT
+ tt.name,
+ tt.utid,
+ c.ts,
+ CAST(c.value AS INT) AS value,
+ args.int_value AS inode
+ FROM thread_counter_track tt
+ JOIN counter c ON c.track_id = tt.id
+ JOIN args USING (arg_set_id)
+ WHERE tt.name = 'mem.dma_heap_change' AND args.key = 'inode';
+ """,
+ out=Csv("""
+ "name","utid","ts","value","inode"
+ "mem.dma_heap_change",1,100,1024,123
+ "mem.dma_heap_change",1,200,-1024,123
+ """))
diff --git a/test/trace_processor/diff_tests/parser/parsing/tests.py b/test/trace_processor/diff_tests/parser/parsing/tests.py
index 988a430..c5a4b46 100644
--- a/test/trace_processor/diff_tests/parser/parsing/tests.py
+++ b/test/trace_processor/diff_tests/parser/parsing/tests.py
@@ -759,7 +759,7 @@
SELECT
freq,
GROUP_CONCAT(cpu) AS cpus
- FROM cpu_frequencies
+ FROM cpu_available_frequencies
GROUP BY freq
ORDER BY freq;
""",
@@ -1395,7 +1395,7 @@
SELECT
freq,
GROUP_CONCAT(cpu.cpu) AS cpus
- FROM cpu_frequencies
+ FROM cpu_available_frequencies
JOIN cpu using (ucpu)
WHERE machine_id is not NULL
GROUP BY freq
diff --git a/test/trace_processor/diff_tests/stdlib/gpu/tests.py b/test/trace_processor/diff_tests/stdlib/android/gpu.py
similarity index 68%
rename from test/trace_processor/diff_tests/stdlib/gpu/tests.py
rename to test/trace_processor/diff_tests/stdlib/android/gpu.py
index 2c23e80..8ae8ae5 100644
--- a/test/trace_processor/diff_tests/stdlib/gpu/tests.py
+++ b/test/trace_processor/diff_tests/stdlib/android/gpu.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (C) 2023 The Android Open Source Project
+# Copyright (C) 2024 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,15 +20,33 @@
from python.generators.diff_tests.testing import PrintProfileProto
-class Gpu(TestSuite):
+class AndroidGpu(TestSuite):
+
+ def test_memory_gpu_per_process(self):
+ return DiffTestBlueprint(
+ trace=Path('../../metrics/graphics/gpu_metric.py'),
+ query="""
+ INCLUDE PERFETTO MODULE android.gpu.memory;
+ SELECT *
+ FROM android_gpu_memory_per_process;
+ """,
+ out=Csv("""
+ "ts","dur","upid","gpu_memory"
+ 2,2,2,6
+ 4,6,2,8
+ 4,5,1,2
+ 9,1,1,8
+ 6,1,3,7
+ 7,3,3,10
+ """))
def test_gpu_frequency(self):
return DiffTestBlueprint(
trace=Path('../../metrics/graphics/gpu_frequency_metric.textproto'),
query="""
- INCLUDE PERFETTO MODULE gpu.frequency;
+ INCLUDE PERFETTO MODULE android.gpu.frequency;
SELECT *
- FROM gpu_frequency;
+ FROM android_gpu_frequency;
""",
out=Csv("""
"ts","dur","gpu_id","gpu_freq"
diff --git a/test/trace_processor/diff_tests/stdlib/android/memory.py b/test/trace_processor/diff_tests/stdlib/android/memory.py
new file mode 100644
index 0000000..1da5baf
--- /dev/null
+++ b/test/trace_processor/diff_tests/stdlib/android/memory.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# Copyright (C) 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License a
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from python.generators.diff_tests.testing import Path, DataPath, Metric, Systrace
+from python.generators.diff_tests.testing import Csv, Json, TextProto, BinaryProto
+from python.generators.diff_tests.testing import DiffTestBlueprint
+from python.generators.diff_tests.testing import TestSuite
+from python.generators.diff_tests.testing import PrintProfileProto
+
+
+class AndroidMemory(TestSuite):
+
+ def test_memory_oom_score_with_rss_and_swap_per_process(self):
+ return DiffTestBlueprint(
+ trace=DataPath('sched_wakeup_trace.atr'),
+ query="""
+ INCLUDE PERFETTO MODULE android.memory.process;
+ SELECT *
+ FROM memory_oom_score_with_rss_and_swap_per_process
+ WHERE oom_adj_reason IS NOT NULL
+ ORDER BY ts
+ LIMIT 10;
+ """,
+ out=Csv("""
+ "ts","dur","score","bucket","upid","process_name","pid","oom_adj_id","oom_adj_ts","oom_adj_dur","oom_adj_track_id","oom_adj_thread_name","oom_adj_reason","oom_adj_trigger","anon_rss","file_rss","shmem_rss","rss","swap","anon_rss_and_swap","rss_and_swap"
+ 1737065264829,701108081,925,"cached",269,"com.android.providers.calendar",1937,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",49229824,57495552,835584,107560960,0,49229824,107560960
+ 1737066678827,2934486383,935,"cached",287,"com.android.imsserviceentitlement",2397,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",48881664,57081856,831488,106795008,0,48881664,106795008
+ 1737066873002,2934292208,945,"cached",292,"com.android.carrierconfig",2593,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",48586752,49872896,823296,99282944,0,48586752,99282944
+ 1737067058812,2934106398,955,"cached",288,"com.android.messaging",2416,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",54956032,71417856,843776,127217664,0,54956032,127217664
+ 1737067246975,699224817,955,"cached",267,"android.process.acore",1866,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",52498432,72048640,856064,125403136,0,52498432,125403136
+ 1737068421919,2932743291,965,"cached",273,"com.android.shell",2079,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",48738304,52056064,823296,101617664,0,48738304,101617664
+ 1737068599673,970398,965,"cached",271,"android.process.media",2003,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",49917952,60444672,839680,111202304,0,49917952,111202304
+ 1737068933602,2932231608,975,"cached",286,"com.android.gallery3d",2371,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",49561600,54521856,831488,104914944,0,49561600,104914944
+ 1737069091010,682459310,975,"cached",289,"com.android.packageinstaller",2480,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",49364992,52539392,827392,102731776,0,49364992,102731776
+ 1737069240534,489635,985,"cached",268,"com.android.managedprovisioning",1868,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",50683904,53985280,815104,105484288,0,50683904,105484288
+ """))
diff --git a/test/trace_processor/diff_tests/stdlib/graphs/scan_tests.py b/test/trace_processor/diff_tests/stdlib/graphs/scan_tests.py
new file mode 100644
index 0000000..f88dd2a
--- /dev/null
+++ b/test/trace_processor/diff_tests/stdlib/graphs/scan_tests.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python3
+# Copyright (C) 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License a
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from python.generators.diff_tests.testing import DataPath
+from python.generators.diff_tests.testing import Csv
+from python.generators.diff_tests.testing import DiffTestBlueprint
+from python.generators.diff_tests.testing import TestSuite
+
+
+class GraphScanTests(TestSuite):
+
+ def test_scan_empty(self):
+ return DiffTestBlueprint(
+ trace=DataPath('counters.json'),
+ query="""
+ INCLUDE PERFETTO MODULE graphs.scan;
+
+ WITH foo AS (
+ SELECT 0 as source_node_id, 0 AS dest_node_id
+ WHERE FALSE
+ )
+ SELECT * FROM _graph_scan!(
+ foo,
+ (SELECT 0 AS id, 0 as depth WHERE FALSE),
+ (depth),
+ (
+ select id, depth + 1 as depth
+ from $table
+ )
+ )
+ """,
+ out=Csv("""
+ "id","depth"
+ """))
+
+ def test_scan_single_row(self):
+ return DiffTestBlueprint(
+ trace=DataPath('counters.json'),
+ query="""
+ INCLUDE PERFETTO MODULE graphs.scan;
+
+ WITH foo AS (
+ SELECT 0 as source_node_id, 0 AS dest_node_id
+ WHERE FALSE
+ )
+ SELECT * FROM _graph_scan!(
+ foo,
+ (SELECT 0 AS id, 0 as depth),
+ (depth),
+ (
+ select id, depth + 1 as depth
+ from $table
+ )
+ )
+ """,
+ out=Csv("""
+ "id","depth"
+ 0,0
+ """))
+
+ def test_scan_max_recursive(self):
+ return DiffTestBlueprint(
+ trace=DataPath('counters.json'),
+ query="""
+ INCLUDE PERFETTO MODULE graphs.scan;
+
+ WITH
+ edges(source_node_id, dest_node_id) AS (
+ VALUES(0, 1), (0, 2), (1, 2), (2, 3), (4, 5)
+ ),
+ init(id, max_depth) AS (
+ VALUES(0, 0), (4, 0)
+ )
+ SELECT * FROM _graph_scan!(
+ edges,
+ init,
+ (max_depth),
+ (
+ SELECT id, MAX(max_depth) + 1 as max_depth
+ FROM $table
+ GROUP BY id
+ )
+ )
+ ORDER BY id
+ """,
+ out=Csv("""
+ "id","max_depth"
+ 0,0
+ 1,1
+ 2,2
+ 3,3
+ 4,0
+ 5,1
+ """))
+
+ def test_scan_min_recursive(self):
+ return DiffTestBlueprint(
+ trace=DataPath('counters.json'),
+ query="""
+ INCLUDE PERFETTO MODULE graphs.scan;
+
+ WITH
+ edges(source_node_id, dest_node_id) AS (
+ VALUES(0, 1), (0, 2), (1, 2), (2, 3), (4, 5)
+ ),
+ init(id, min_depth) AS (
+ VALUES(0, 0), (4, 0)
+ )
+ SELECT * FROM _graph_scan!(
+ edges,
+ init,
+ (min_depth),
+ (
+ SELECT id, MIN(min_depth) + 1 as min_depth
+ FROM $table
+ GROUP BY id
+ )
+ )
+ ORDER BY id
+ """,
+ out=Csv("""
+ "id","min_depth"
+ 0,0
+ 1,1
+ 2,1
+ 3,2
+ 4,0
+ 5,1
+ """))
diff --git a/test/trace_processor/diff_tests/stdlib/graphs/search_tests.py b/test/trace_processor/diff_tests/stdlib/graphs/search_tests.py
index 4ff28b4..e1eee98 100644
--- a/test/trace_processor/diff_tests/stdlib/graphs/search_tests.py
+++ b/test/trace_processor/diff_tests/stdlib/graphs/search_tests.py
@@ -194,7 +194,7 @@
11,10
"""))
- def test_dfs_lengauer_tarjan_example(self):
+ def test_bfs_lengauer_tarjan_example(self):
return DiffTestBlueprint(
trace=DataPath('counters.json'),
query="""
diff --git a/test/trace_processor/diff_tests/stdlib/linux/memory.py b/test/trace_processor/diff_tests/stdlib/linux/memory.py
new file mode 100644
index 0000000..be5d8ab
--- /dev/null
+++ b/test/trace_processor/diff_tests/stdlib/linux/memory.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python3
+# Copyright (C) 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License a
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from python.generators.diff_tests.testing import Path, DataPath, Metric, Systrace
+from python.generators.diff_tests.testing import Csv, Json, TextProto, BinaryProto
+from python.generators.diff_tests.testing import DiffTestBlueprint
+from python.generators.diff_tests.testing import TestSuite
+from python.generators.diff_tests.testing import PrintProfileProto
+
+
+class Memory(TestSuite):
+
+ def test_memory_rss_and_swap_per_process(self):
+ return DiffTestBlueprint(
+ trace=DataPath('android_postboot_unlock.pftrace'),
+ query="""
+ INCLUDE PERFETTO MODULE linux.memory.process;
+
+ SELECT *
+ FROM memory_rss_and_swap_per_process
+ WHERE upid = 1
+ LIMIT 5
+ """,
+ out=Csv("""
+ "ts","dur","upid","pid","process_name","anon_rss","file_rss","shmem_rss","rss","swap","anon_rss_and_swap","rss_and_swap"
+ 37592474220,12993896,1,1982,"com.android.systemui",125865984,"[NULL]","[NULL]","[NULL]","[NULL]",125865984,"[NULL]"
+ 37605468116,1628,1,1982,"com.android.systemui",126050304,"[NULL]","[NULL]","[NULL]","[NULL]",126050304,"[NULL]"
+ 37605469744,1302,1,1982,"com.android.systemui",126050304,"[NULL]",2990080,"[NULL]","[NULL]",126050304,"[NULL]"
+ 37605471046,685791,1,1982,"com.android.systemui",126046208,"[NULL]",2990080,"[NULL]","[NULL]",126046208,"[NULL]"
+ 37606156837,6510,1,1982,"com.android.systemui",126042112,"[NULL]",2990080,"[NULL]","[NULL]",126042112,"[NULL]"
+ """))
+
+ def test_memory_rss_high_watermark_per_process(self):
+ return DiffTestBlueprint(
+ trace=DataPath('android_postboot_unlock.pftrace'),
+ query="""
+ INCLUDE PERFETTO MODULE linux.memory.high_watermark;
+
+ SELECT *
+ FROM memory_rss_high_watermark_per_process
+ WHERE upid = 1
+ LIMIT 10;
+ """,
+ out=Csv("""
+ "ts","dur","upid","pid","process_name","rss_high_watermark"
+ 37592474220,12993896,1,1982,"com.android.systemui",125865984
+ 37605468116,1628,1,1982,"com.android.systemui",126050304
+ 37605469744,333774129,1,1982,"com.android.systemui",129040384
+ 37939243873,120479574,1,1982,"com.android.systemui",372977664
+ 38059723447,936,1,1982,"com.android.systemui",373043200
+ 38059724383,6749186,1,1982,"com.android.systemui",373174272
+ 38066473569,7869426,1,1982,"com.android.systemui",373309440
+ 38074342995,11596761,1,1982,"com.android.systemui",373444608
+ 38085939756,4877848,1,1982,"com.android.systemui",373579776
+ 38090817604,11930827,1,1982,"com.android.systemui",373714944
+ """))
diff --git a/test/trace_processor/diff_tests/stdlib/memory/tests.py b/test/trace_processor/diff_tests/stdlib/memory/tests.py
deleted file mode 100644
index d781b54..0000000
--- a/test/trace_processor/diff_tests/stdlib/memory/tests.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (C) 2024 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License a
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from python.generators.diff_tests.testing import Path, DataPath, Metric, Systrace
-from python.generators.diff_tests.testing import Csv, Json, TextProto, BinaryProto
-from python.generators.diff_tests.testing import DiffTestBlueprint
-from python.generators.diff_tests.testing import TestSuite
-from python.generators.diff_tests.testing import PrintProfileProto
-
-
-class Memory(TestSuite):
-
- def test_memory_rss_and_swap_per_process(self):
- return DiffTestBlueprint(
- trace=DataPath('android_postboot_unlock.pftrace'),
- query="""
- INCLUDE PERFETTO MODULE memory.linux.process;
-
- SELECT *
- FROM memory_rss_and_swap_per_process
- WHERE upid = 1
- LIMIT 5
- """,
- out=Csv("""
- "ts","dur","upid","pid","process_name","anon_rss","file_rss","shmem_rss","rss","swap","anon_rss_and_swap","rss_and_swap"
- 37592474220,12993896,1,1982,"com.android.systemui",125865984,"[NULL]","[NULL]","[NULL]","[NULL]",125865984,"[NULL]"
- 37605468116,1628,1,1982,"com.android.systemui",126050304,"[NULL]","[NULL]","[NULL]","[NULL]",126050304,"[NULL]"
- 37605469744,1302,1,1982,"com.android.systemui",126050304,"[NULL]",2990080,"[NULL]","[NULL]",126050304,"[NULL]"
- 37605471046,685791,1,1982,"com.android.systemui",126046208,"[NULL]",2990080,"[NULL]","[NULL]",126046208,"[NULL]"
- 37606156837,6510,1,1982,"com.android.systemui",126042112,"[NULL]",2990080,"[NULL]","[NULL]",126042112,"[NULL]"
- """))
-
- def test_memory_rss_high_watermark_per_process(self):
- return DiffTestBlueprint(
- trace=DataPath('android_postboot_unlock.pftrace'),
- query="""
- INCLUDE PERFETTO MODULE memory.linux.high_watermark;
-
- SELECT *
- FROM memory_rss_high_watermark_per_process
- WHERE upid = 1
- LIMIT 10;
- """,
- out=Csv("""
- "ts","dur","upid","pid","process_name","rss_high_watermark"
- 37592474220,12993896,1,1982,"com.android.systemui",125865984
- 37605468116,1628,1,1982,"com.android.systemui",126050304
- 37605469744,333774129,1,1982,"com.android.systemui",129040384
- 37939243873,120479574,1,1982,"com.android.systemui",372977664
- 38059723447,936,1,1982,"com.android.systemui",373043200
- 38059724383,6749186,1,1982,"com.android.systemui",373174272
- 38066473569,7869426,1,1982,"com.android.systemui",373309440
- 38074342995,11596761,1,1982,"com.android.systemui",373444608
- 38085939756,4877848,1,1982,"com.android.systemui",373579776
- 38090817604,11930827,1,1982,"com.android.systemui",373714944
- """))
-
- def test_memory_oom_score_with_rss_and_swap_per_process(self):
- return DiffTestBlueprint(
- trace=DataPath('sched_wakeup_trace.atr'),
- query="""
- INCLUDE PERFETTO MODULE memory.linux.process;
- SELECT *
- FROM memory_oom_score_with_rss_and_swap_per_process
- WHERE oom_adj_reason IS NOT NULL
- ORDER BY ts
- LIMIT 10;
- """,
- out=Csv("""
- "ts","dur","score","bucket","upid","process_name","pid","oom_adj_id","oom_adj_ts","oom_adj_dur","oom_adj_track_id","oom_adj_thread_name","oom_adj_reason","oom_adj_trigger","anon_rss","file_rss","shmem_rss","rss","swap","anon_rss_and_swap","rss_and_swap"
- 1737065264829,701108081,925,"cached",269,"com.android.providers.calendar",1937,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",49229824,57495552,835584,107560960,0,49229824,107560960
- 1737066678827,2934486383,935,"cached",287,"com.android.imsserviceentitlement",2397,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",48881664,57081856,831488,106795008,0,48881664,106795008
- 1737066873002,2934292208,945,"cached",292,"com.android.carrierconfig",2593,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",48586752,49872896,823296,99282944,0,48586752,99282944
- 1737067058812,2934106398,955,"cached",288,"com.android.messaging",2416,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",54956032,71417856,843776,127217664,0,54956032,127217664
- 1737067246975,699224817,955,"cached",267,"android.process.acore",1866,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",52498432,72048640,856064,125403136,0,52498432,125403136
- 1737068421919,2932743291,965,"cached",273,"com.android.shell",2079,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",48738304,52056064,823296,101617664,0,48738304,101617664
- 1737068599673,970398,965,"cached",271,"android.process.media",2003,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",49917952,60444672,839680,111202304,0,49917952,111202304
- 1737068933602,2932231608,975,"cached",286,"com.android.gallery3d",2371,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",49561600,54521856,831488,104914944,0,49561600,104914944
- 1737069091010,682459310,975,"cached",289,"com.android.packageinstaller",2480,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",49364992,52539392,827392,102731776,0,49364992,102731776
- 1737069240534,489635,985,"cached",268,"com.android.managedprovisioning",1868,332,1737064421516,29484835,1217,"binder:642_1","processEnd","IActivityManager#1598246212",50683904,53985280,815104,105484288,0,50683904,105484288
- """))
-
- def test_memory_gpu_per_process(self):
- return DiffTestBlueprint(
- trace=Path('../../metrics/graphics/gpu_metric.py'),
- query="""
- INCLUDE PERFETTO MODULE memory.android.gpu;
- SELECT *
- FROM memory_gpu_per_process;
- """,
- out=Csv("""
- "ts","dur","upid","gpu_memory"
- 2,2,2,6
- 4,6,2,8
- 4,5,1,2
- 9,1,1,8
- 6,1,3,7
- 7,3,3,10
- """))
diff --git a/test/trace_processor/diff_tests/tables/tests.py b/test/trace_processor/diff_tests/tables/tests.py
index aa14035..7502625 100644
--- a/test/trace_processor/diff_tests/tables/tests.py
+++ b/test/trace_processor/diff_tests/tables/tests.py
@@ -299,6 +299,32 @@
70,7
"""))
+ def test_ii_wrong_partition(self):
+ return DiffTestBlueprint(
+ trace=TextProto(''),
+ query="""
+ CREATE PERFETTO TABLE A
+ AS
+ WITH x(id, ts, ts_end, c0) AS (VALUES(1, 1, 2, 1), (2, 3, 4, 2))
+ SELECT * FROM x;
+
+ CREATE PERFETTO TABLE B
+ AS
+ WITH x(id, ts, ts_end, c0) AS (VALUES(1, 5, 6, 3))
+ SELECT * FROM x;
+
+ SELECT
+ a.id AS a_id,
+ b.id AS b_id
+ FROM __intrinsic_ii_with_interval_tree('A', 'c0') a
+ JOIN __intrinsic_ii_with_interval_tree('B', 'c0') b
+ USING (c0)
+ WHERE a.ts < b.ts_end AND a.ts_end > b.ts;
+ """,
+ out=Csv("""
+ "a_id","b_id"
+ """))
+
# Null printing
def test_nulls(self):
return DiffTestBlueprint(
diff --git a/ui/release/channels.json b/ui/release/channels.json
index 157c8fa..d7473ce 100644
--- a/ui/release/channels.json
+++ b/ui/release/channels.json
@@ -2,11 +2,11 @@
"channels": [
{
"name": "stable",
- "rev": "c0c52a371ad978baf02fa7094140c6e95402bf22"
+ "rev": "f256f01aeb18f7d3ac87d9028cf59436178cf395"
},
{
"name": "canary",
- "rev": "43cf00fbe761628c5552d85fd92f3490fbf301bc"
+ "rev": "2296d092fb68cfc4fd3de4735a8e6470857da631"
},
{
"name": "autopush",
diff --git a/ui/src/common/gcs_uploader.ts b/ui/src/common/gcs_uploader.ts
new file mode 100644
index 0000000..46e514a
--- /dev/null
+++ b/ui/src/common/gcs_uploader.ts
@@ -0,0 +1,207 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {defer} from '../base/deferred';
+import {Time} from '../base/time';
+import {TraceFileStream} from '../core/trace_stream';
+
+export const BUCKET_NAME = 'perfetto-ui-data';
+export const MIME_JSON = 'application/json; charset=utf-8';
+export const MIME_BINARY = 'application/octet-stream';
+
+export interface GcsUploaderArgs {
+ /**
+ * The mime-type to use for the upload. If undefined uses
+ * application/octet-stream.
+ */
+ mimeType?: string;
+
+ /**
+ * The name to use for the uploaded file. By default it uses a hash of
+ * the passed data/blob and uses content-addressing.
+ */
+ fileName?: string;
+
+ /** An optional callback that is invoked upon upload progress (or failure) */
+ onProgress?: (uploader: GcsUploader) => void;
+}
+
+/**
+ * A utility class to handle uploads of possibly large files to
+ * Google Cloud Storage.
+ * It returns immediately if the file exists already
+ */
+export class GcsUploader {
+ state: 'UPLOADING' | 'UPLOADED' | 'ERROR' = 'UPLOADING';
+ error = '';
+ totalSize = 0;
+ uploadedSize = 0;
+ uploadedUrl = '';
+ uploadedFileName = '';
+
+ private args: GcsUploaderArgs;
+ private onProgress: (_: GcsUploader) => void;
+ private req: XMLHttpRequest;
+ private donePromise = defer<void>();
+ private startTime = performance.now();
+
+ constructor(data: Blob | ArrayBuffer | string, args: GcsUploaderArgs) {
+ this.args = args;
+ this.onProgress = args.onProgress ?? ((_: GcsUploader) => {});
+ this.req = new XMLHttpRequest();
+ this.start(data);
+ }
+
+ async start(data: Blob | ArrayBuffer | string) {
+ let fname = this.args.fileName;
+ if (fname === undefined) {
+ // If the file name is unspecified, hash the contents.
+ if (data instanceof Blob) {
+ fname = await hashFileStreaming(data);
+ } else {
+ fname = await sha1(data);
+ }
+ }
+ this.uploadedFileName = fname;
+ this.uploadedUrl = `https://storage.googleapis.com/${BUCKET_NAME}/${fname}`;
+
+ // Check if the file has been uploaded already. If so, skip.
+ const res = await fetch(
+ `https://www.googleapis.com/storage/v1/b/${BUCKET_NAME}/o/${fname}`,
+ );
+ if (res.status === 200) {
+ console.log(
+ `Skipping upload of ${this.uploadedUrl} because it exists already`,
+ );
+ this.state = 'UPLOADED';
+ this.donePromise.resolve();
+ return;
+ }
+
+ const reqUrl =
+ 'https://www.googleapis.com/upload/storage/v1/b/' +
+ `${BUCKET_NAME}/o?uploadType=media` +
+ `&name=${fname}&predefinedAcl=publicRead`;
+ this.req.onabort = (e: ProgressEvent) => this.onRpcEvent(e);
+ this.req.onerror = (e: ProgressEvent) => this.onRpcEvent(e);
+ this.req.upload.onprogress = (e: ProgressEvent) => this.onRpcEvent(e);
+ this.req.onloadend = (e: ProgressEvent) => this.onRpcEvent(e);
+ this.req.open('POST', reqUrl, /* async= */ true);
+ const mimeType = this.args.mimeType ?? MIME_BINARY;
+ this.req.setRequestHeader('Content-Type', mimeType);
+ this.req.send(data);
+ }
+
+ waitForCompletion(): Promise<void> {
+ return this.donePromise;
+ }
+
+ abort() {
+ if (this.state === 'UPLOADING') {
+ this.req.abort();
+ }
+ }
+
+ getEtaString() {
+ let str = `${Math.ceil((100 * this.uploadedSize) / this.totalSize)}%`;
+ str += ` (${(this.uploadedSize / 1e6).toFixed(2)} MB)`;
+ const elapsed = (performance.now() - this.startTime) / 1000;
+ const rate = this.uploadedSize / elapsed;
+ const etaSecs = Math.round((this.totalSize - this.uploadedSize) / rate);
+ str += ' - ETA: ' + Time.toTimecode(Time.fromSeconds(etaSecs)).dhhmmss;
+ return str;
+ }
+
+ private onRpcEvent(e: ProgressEvent) {
+ let done = false;
+ switch (e.type) {
+ case 'progress':
+ this.uploadedSize = e.loaded;
+ this.totalSize = e.total;
+ break;
+ case 'abort':
+ this.state = 'ERROR';
+ this.error = 'Upload aborted';
+ break;
+ case 'error':
+ this.state = 'ERROR';
+ this.error = `${this.req.status} - ${this.req.statusText}`;
+ break;
+ case 'loadend':
+ done = true;
+ if (this.req.status === 200) {
+ this.state = 'UPLOADED';
+ } else if (this.state === 'UPLOADING') {
+ this.state = 'ERROR';
+ this.error = `${this.req.status} - ${this.req.statusText}`;
+ }
+ break;
+ default:
+ return;
+ }
+ this.onProgress(this);
+ if (done) {
+ this.donePromise.resolve();
+ }
+ }
+}
+
+/**
+ * Computes the SHA-1 of a string or ArrayBuffer(View)
+ * @param data: a string or ArrayBuffer to hash
+ */
+async function sha1(data: string | ArrayBuffer): Promise<string> {
+ let buffer: ArrayBuffer;
+ if (typeof data === 'string') {
+ buffer = new TextEncoder().encode(data);
+ } else {
+ buffer = data;
+ }
+ const digest = await crypto.subtle.digest('SHA-1', buffer);
+ return digestToHex(digest);
+}
+
+/**
+ * Converts a hash for the given file in streaming mode, without loading the
+ * whole file into memory. The result is "a" SHA-1 but is not the same of
+ * `shasum -a 1 file`. The reason for this is that the crypto APIs support
+ * only one-shot digest computation and lack the usual update() + digest()
+ * chunked API. So we end up computing a SHA-1 of the concatenation of the
+ * SHA-1 of each chunk.
+ * Speed: ~800 MB/s on a M2 Macbook Air 2023.
+ * @param file The file to hash.
+ * @return A hex-encoded string containing the hash of the file.
+ */
+async function hashFileStreaming(file: Blob): Promise<string> {
+ const fileStream = new TraceFileStream(file);
+ let chunkDigests = '';
+ for (;;) {
+ const chunk = await fileStream.readChunk();
+ const digest = await crypto.subtle.digest('SHA-1', chunk.data);
+ chunkDigests += digestToHex(digest);
+ if (chunk.eof) break;
+ }
+ return sha1(chunkDigests);
+}
+
+/**
+ * Converts the return value of crypto.digest() to a hex string.
+ * @param digest an array of bytes containing the digest
+ * @returns hex-encoded string of the digest.
+ */
+function digestToHex(digest: ArrayBuffer): string {
+ return Array.from(new Uint8Array(digest))
+ .map((x) => x.toString(16).padStart(2, '0'))
+ .join('');
+}
diff --git a/ui/src/common/flamegraph_unittest.ts b/ui/src/common/legacy_flamegraph_unittest.ts
similarity index 99%
rename from ui/src/common/flamegraph_unittest.ts
rename to ui/src/common/legacy_flamegraph_unittest.ts
index e0b99bf..222b2e2 100644
--- a/ui/src/common/flamegraph_unittest.ts
+++ b/ui/src/common/legacy_flamegraph_unittest.ts
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-import {CallsiteInfo, mergeCallsites} from './flamegraph_util';
+import {CallsiteInfo, mergeCallsites} from './legacy_flamegraph_util';
test('zeroCallsitesMerged', () => {
const callsites: CallsiteInfo[] = [
diff --git a/ui/src/common/flamegraph_util.ts b/ui/src/common/legacy_flamegraph_util.ts
similarity index 100%
rename from ui/src/common/flamegraph_util.ts
rename to ui/src/common/legacy_flamegraph_util.ts
diff --git a/ui/src/common/state_unittest.ts b/ui/src/common/state_unittest.ts
index ff111de..3dc65e2 100644
--- a/ui/src/common/state_unittest.ts
+++ b/ui/src/common/state_unittest.ts
@@ -16,7 +16,6 @@
import {createEmptyState} from './empty_state';
import {getContainingGroupKey, State} from './state';
-import {deserializeStateObject, serializeStateObject} from './upload_utils';
test('createEmptyState', () => {
const state: State = createEmptyState();
@@ -44,28 +43,3 @@
expect(getContainingGroupKey(state, 'a')).toEqual(null);
expect(getContainingGroupKey(state, 'b')).toEqual('containsB');
});
-
-test('state is serializable', () => {
- const state = createEmptyState();
- const json = serializeStateObject(state);
- const restored = deserializeStateObject<State>(json);
-
- // Remove non-serialized fields from the original state object, so it may be
- // compared fairly with the restored version.
- // This is a legitimate use of 'any'. We are comparing this object against
- // one that's taken a round trip through JSON, which has therefore lost any
- // type information. Attempting to ask TS for help here would serve no
- // purpose.
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- const serializableState: any = state;
- serializableState.nonSerializableState = undefined;
-
- // Remove any undefined values from original as JSON doesn't serialize them
- for (const key in serializableState) {
- if (serializableState[key] === undefined) {
- delete serializableState[key];
- }
- }
-
- expect(serializableState).toEqual(restored);
-});
diff --git a/ui/src/common/upload_utils.ts b/ui/src/common/upload_utils.ts
deleted file mode 100644
index 627a337..0000000
--- a/ui/src/common/upload_utils.ts
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright (C) 2020 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-import {isString} from '../base/object_utils';
-import {RecordConfig} from '../controller/record_config_types';
-
-export const BUCKET_NAME = 'perfetto-ui-data';
-import {v4 as uuidv4} from 'uuid';
-import {State} from './state';
-import {defer} from '../base/deferred';
-import {Time} from '../base/time';
-
-export class TraceGcsUploader {
- state: 'UPLOADING' | 'UPLOADED' | 'ERROR' = 'UPLOADING';
- error = '';
- totalSize = 0;
- uploadedSize = 0;
- uploadedUrl = '';
- onProgress: () => void;
- private req: XMLHttpRequest;
- private reqUrl: string;
- private donePromise = defer<void>();
- private startTime = performance.now();
-
- constructor(trace: File | ArrayBuffer, onProgress?: () => void) {
- // TODO(hjd): This should probably also be a hash but that requires
- // trace processor support.
- const name = uuidv4();
- this.uploadedUrl = `https://storage.googleapis.com/${BUCKET_NAME}/${name}`;
- this.reqUrl =
- 'https://www.googleapis.com/upload/storage/v1/b/' +
- `${BUCKET_NAME}/o?uploadType=media` +
- `&name=${name}&predefinedAcl=publicRead`;
- this.onProgress = onProgress || (() => {});
- this.req = new XMLHttpRequest();
- this.req.onabort = (e: ProgressEvent) => this.onRpcEvent(e);
- this.req.onerror = (e: ProgressEvent) => this.onRpcEvent(e);
- this.req.upload.onprogress = (e: ProgressEvent) => this.onRpcEvent(e);
- this.req.onloadend = (e: ProgressEvent) => this.onRpcEvent(e);
- this.req.open('POST', this.reqUrl);
- this.req.setRequestHeader('Content-Type', 'application/octet-stream');
- this.req.send(trace);
- }
-
- waitForCompletion(): Promise<void> {
- return this.donePromise;
- }
-
- abort() {
- if (this.state === 'UPLOADING') {
- this.req.abort();
- }
- }
-
- getEtaString() {
- let str = `${Math.ceil((100 * this.uploadedSize) / this.totalSize)}%`;
- str += ` (${(this.uploadedSize / 1e6).toFixed(2)} MB)`;
- const elapsed = (performance.now() - this.startTime) / 1000;
- const rate = this.uploadedSize / elapsed;
- const etaSecs = Math.round((this.totalSize - this.uploadedSize) / rate);
- str += ' - ETA: ' + Time.toTimecode(Time.fromSeconds(etaSecs)).dhhmmss;
- return str;
- }
-
- private onRpcEvent(e: ProgressEvent) {
- let done = false;
- switch (e.type) {
- case 'progress':
- this.uploadedSize = e.loaded;
- this.totalSize = e.total;
- break;
- case 'abort':
- this.state = 'ERROR';
- this.error = 'Upload aborted';
- break;
- case 'error':
- this.state = 'ERROR';
- this.error = `${this.req.status} - ${this.req.statusText}`;
- break;
- case 'loadend':
- done = true;
- if (this.req.status === 200) {
- this.state = 'UPLOADED';
- } else if (this.state === 'UPLOADING') {
- this.state = 'ERROR';
- this.error = `${this.req.status} - ${this.req.statusText}`;
- }
- break;
- default:
- return;
- }
- this.onProgress();
- if (done) {
- this.donePromise.resolve();
- }
- }
-}
-
-// Bigint's are not serializable using JSON.stringify, so we use a special
-// object when serialising
-export type SerializedBigint = {
- __kind: 'bigint';
- value: string;
-};
-
-// Check if a value looks like a serialized bigint
-export function isSerializedBigint(value: unknown): value is SerializedBigint {
- if (value === null) {
- return false;
- }
- if (typeof value !== 'object') {
- return false;
- }
- if ('__kind' in value && 'value' in value) {
- return value.__kind === 'bigint' && isString(value.value);
- }
- return false;
-}
-
-export function serializeStateObject(object: unknown): string {
- const json = JSON.stringify(object, (key, value) => {
- if (typeof value === 'bigint') {
- return {
- __kind: 'bigint',
- value: value.toString(),
- };
- }
- return key === 'nonSerializableState' ? undefined : value;
- });
- return json;
-}
-
-export function deserializeStateObject<T>(json: string): T {
- const object = JSON.parse(json, (_key, value) => {
- if (isSerializedBigint(value)) {
- return BigInt(value.value);
- }
- return value;
- });
- return object as T;
-}
-
-export async function saveState(
- stateOrConfig: State | RecordConfig,
-): Promise<string> {
- const text = serializeStateObject(stateOrConfig);
- const hash = await toSha256(text);
- const url =
- 'https://www.googleapis.com/upload/storage/v1/b/' +
- `${BUCKET_NAME}/o?uploadType=media` +
- `&name=${hash}&predefinedAcl=publicRead`;
- const response = await fetch(url, {
- method: 'post',
- headers: {
- 'Content-Type': 'application/json; charset=utf-8',
- },
- body: text,
- });
- await response.json();
- return hash;
-}
-
-// This has a bug:
-// x.toString(16) doesn't zero pad so if the digest is:
-// [23, 7, 42, ...]
-// You get:
-// ['17', '7', '2a', ...] = 1772a...
-// Rather than:
-// ['17', '07', '2a', ...] = 17072a...
-// As you ought to (and as the hexdigest is computed by e.g. Python).
-// Unfortunately there are a lot of old permalinks out there so we
-// still need this broken implementation to check their hashes.
-export async function buggyToSha256(str: string): Promise<string> {
- const buffer = new TextEncoder().encode(str);
- const digest = await crypto.subtle.digest('SHA-256', buffer);
- return Array.from(new Uint8Array(digest))
- .map((x) => x.toString(16))
- .join('');
-}
-
-export async function toSha256(str: string): Promise<string> {
- const buffer = new TextEncoder().encode(str);
- const digest = await crypto.subtle.digest('SHA-256', buffer);
- return Array.from(new Uint8Array(digest))
- .map((x) => x.toString(16).padStart(2, '0'))
- .join('');
-}
diff --git a/ui/src/common/upload_utils_unittest.ts b/ui/src/common/upload_utils_unittest.ts
deleted file mode 100644
index 04f6cd7..0000000
--- a/ui/src/common/upload_utils_unittest.ts
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (C) 2023 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-import {
- deserializeStateObject,
- isSerializedBigint,
- serializeStateObject,
-} from './upload_utils';
-
-describe('isSerializedBigint', () => {
- it('should return true for a valid serialized bigint', () => {
- const value = {
- __kind: 'bigint',
- value: '1234567890',
- };
- expect(isSerializedBigint(value)).toBeTruthy();
- });
-
- it('should return false for a null value', () => {
- expect(isSerializedBigint(null)).toBeFalsy();
- });
-
- it('should return false for a non-object value', () => {
- expect(isSerializedBigint(123)).toBeFalsy();
- });
-
- it('should return false for a non-serialized bigint value', () => {
- const value = {
- __kind: 'not-bigint',
- value: '1234567890',
- };
- expect(isSerializedBigint(value)).toBeFalsy();
- });
-});
-
-describe('serializeStateObject', () => {
- it('should serialize a simple object', () => {
- const object = {
- a: 1,
- b: 2,
- c: 3,
- };
- const expectedJson = `{"a":1,"b":2,"c":3}`;
- expect(serializeStateObject(object)).toEqual(expectedJson);
- });
-
- it('should serialize a bigint', () => {
- const object = {
- a: 123456789123456789n,
- };
- const expectedJson = `{"a":{"__kind":"bigint","value":"123456789123456789"}}`;
- expect(serializeStateObject(object)).toEqual(expectedJson);
- });
-
- it('should not serialize a non-serializable property', () => {
- const object = {
- a: 1,
- b: 2,
- c: 3,
- nonSerializableState: 4,
- };
- const expectedJson = `{"a":1,"b":2,"c":3}`;
- expect(serializeStateObject(object)).toEqual(expectedJson);
- });
-});
-
-describe('deserializeStateObject', () => {
- it('should deserialize a simple object', () => {
- const json = `{"a":1,"b":2,"c":3}`;
- const expectedObject = {
- a: 1,
- b: 2,
- c: 3,
- };
- expect(deserializeStateObject(json)).toEqual(expectedObject);
- });
-
- it('should deserialize a bigint', () => {
- const json = `{"a":{"__kind":"bigint","value":"123456789123456789"}}`;
- const expectedObject = {
- a: 123456789123456789n,
- };
- expect(deserializeStateObject(json)).toEqual(expectedObject);
- });
-
- it('should deserialize a null', () => {
- const json = `{"a":null}`;
- const expectedObject = {
- a: null,
- };
- expect(deserializeStateObject(json)).toEqual(expectedObject);
- });
-});
diff --git a/ui/src/controller/cpu_profile_controller.ts b/ui/src/controller/cpu_profile_controller.ts
index 38d8cbf..d658170 100644
--- a/ui/src/controller/cpu_profile_controller.ts
+++ b/ui/src/controller/cpu_profile_controller.ts
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-import {CallsiteInfo} from '../common/flamegraph_util';
+import {CallsiteInfo} from '../common/legacy_flamegraph_util';
import {CpuProfileSampleSelection, getLegacySelection} from '../common/state';
import {CpuProfileDetails, globals} from '../frontend/globals';
import {publishCpuProfileDetails} from '../frontend/publish';
diff --git a/ui/src/controller/trace_controller.ts b/ui/src/controller/trace_controller.ts
index dea50d7..4dc1934 100644
--- a/ui/src/controller/trace_controller.ts
+++ b/ui/src/controller/trace_controller.ts
@@ -95,7 +95,7 @@
TraceStream,
} from '../core/trace_stream';
import {decideTracks} from './track_decider';
-import {profileType} from '../frontend/flamegraph_panel';
+import {profileType} from '../frontend/legacy_flamegraph_panel';
import {FlamegraphCache} from '../core/flamegraph_cache';
type States = 'init' | 'loading_trace' | 'ready';
diff --git a/ui/src/core/default_plugins.ts b/ui/src/core/default_plugins.ts
index 56a6b19..235bbb3 100644
--- a/ui/src/core/default_plugins.ts
+++ b/ui/src/core/default_plugins.ts
@@ -32,6 +32,7 @@
'dev.perfetto.BookmarkletApi',
'dev.perfetto.CoreCommands',
'dev.perfetto.LargeScreensPerf',
+ 'dev.perfetto.PinAndroidPerfMetrics',
'dev.perfetto.PinSysUITracks',
'dev.perfetto.RestorePinnedTrack',
'dev.perfetto.TimelineSync',
diff --git a/ui/src/core_plugins/counter/index.ts b/ui/src/core_plugins/counter/index.ts
index 2af0836..108b7ff 100644
--- a/ui/src/core_plugins/counter/index.ts
+++ b/ui/src/core_plugins/counter/index.ts
@@ -78,9 +78,13 @@
options.yRangeSharingKey = 'power';
}
- if (name.startsWith('mem.')) {
- options.yRangeSharingKey = 'mem';
- }
+ // TODO(stevegolton): We need to rethink how this works for virtual memory.
+ // The problem is we can easily have > 10GB virtual memory which dwarfs
+ // physical memory making other memory tracks difficult to read.
+
+ // if (name.startsWith('mem.')) {
+ // options.yRangeSharingKey = 'mem';
+ // }
if (name.startsWith('battery_stats.')) {
options.yRangeSharingKey = 'battery_stats';
diff --git a/ui/src/core_plugins/heap_profile/heap_profile_track.ts b/ui/src/core_plugins/heap_profile/heap_profile_track.ts
index 264a216..d5743e2 100644
--- a/ui/src/core_plugins/heap_profile/heap_profile_track.ts
+++ b/ui/src/core_plugins/heap_profile/heap_profile_track.ts
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-import {profileType} from '../..//frontend/flamegraph_panel';
+import {profileType} from '../../frontend/legacy_flamegraph_panel';
import {Actions} from '../../common/actions';
import {ProfileType, LegacySelection} from '../../common/state';
import {
diff --git a/ui/src/core_plugins/heap_profile/index.ts b/ui/src/core_plugins/heap_profile/index.ts
index 8e0b5ba..24b09e3 100644
--- a/ui/src/core_plugins/heap_profile/index.ts
+++ b/ui/src/core_plugins/heap_profile/index.ts
@@ -14,9 +14,9 @@
import {FlamegraphCache} from '../../core/flamegraph_cache';
import {
- FlamegraphDetailsPanel,
+ LegacyFlamegraphDetailsPanel,
profileType,
-} from '../../frontend/flamegraph_panel';
+} from '../../frontend/legacy_flamegraph_panel';
import {Plugin, PluginContextTrace, PluginDescriptor} from '../../public';
import {NUM} from '../../trace_processor/query_result';
import {HeapProfileTrack} from './heap_profile_track';
@@ -53,7 +53,7 @@
ctx.registerDetailsPanel({
render: (sel) => {
if (sel.kind === 'HEAP_PROFILE') {
- return m(FlamegraphDetailsPanel, {
+ return m(LegacyFlamegraphDetailsPanel, {
cache,
selection: {
profileType: profileType(sel.type),
diff --git a/ui/src/core_plugins/perf_samples_profile/index.ts b/ui/src/core_plugins/perf_samples_profile/index.ts
index 8517739..098244e 100644
--- a/ui/src/core_plugins/perf_samples_profile/index.ts
+++ b/ui/src/core_plugins/perf_samples_profile/index.ts
@@ -16,9 +16,9 @@
import {PERF_SAMPLES_PROFILE_TRACK_KIND} from '../../public';
import {FlamegraphCache} from '../../core/flamegraph_cache';
import {
- FlamegraphDetailsPanel,
+ LegacyFlamegraphDetailsPanel,
profileType,
-} from '../../frontend/flamegraph_panel';
+} from '../../frontend/legacy_flamegraph_panel';
import {Plugin, PluginContextTrace, PluginDescriptor} from '../../public';
import {NUM} from '../../trace_processor/query_result';
import {PerfSamplesProfileTrack} from './perf_samples_profile_track';
@@ -50,7 +50,7 @@
ctx.registerDetailsPanel({
render: (sel) => {
if (sel.kind === 'PERF_SAMPLES') {
- return m(FlamegraphDetailsPanel, {
+ return m(LegacyFlamegraphDetailsPanel, {
cache,
selection: {
profileType: profileType(sel.type),
diff --git a/ui/src/core_plugins/perf_samples_profile/perf_samples_profile_track.ts b/ui/src/core_plugins/perf_samples_profile/perf_samples_profile_track.ts
index a9b3ca9..21d7876 100644
--- a/ui/src/core_plugins/perf_samples_profile/perf_samples_profile_track.ts
+++ b/ui/src/core_plugins/perf_samples_profile/perf_samples_profile_track.ts
@@ -18,7 +18,7 @@
import {ProfileType, getLegacySelection} from '../../common/state';
import {TrackData} from '../../common/track_data';
import {TimelineFetcher} from '../../common/track_helper';
-import {FLAMEGRAPH_HOVERED_COLOR} from '../../frontend/flamegraph';
+import {FLAMEGRAPH_HOVERED_COLOR} from '../../frontend/legacy_flamegraph';
import {globals} from '../../frontend/globals';
import {PanelSize} from '../../frontend/panel';
import {TimeScale} from '../../frontend/time_scale';
diff --git a/ui/src/frontend/aggregation_tab.ts b/ui/src/frontend/aggregation_tab.ts
index 0c68f5f..f8889bb 100644
--- a/ui/src/frontend/aggregation_tab.ts
+++ b/ui/src/frontend/aggregation_tab.ts
@@ -24,9 +24,9 @@
import {FlowEventsAreaSelectedPanel} from './flow_events_panel';
import {PivotTable} from './pivot_table';
import {
- FlamegraphDetailsPanel,
+ LegacyFlamegraphDetailsPanel,
FlamegraphSelectionParams,
-} from './flamegraph_panel';
+} from './legacy_flamegraph_panel';
import {ProfileType, TrackState} from '../common/state';
import {assertExists} from '../base/logging';
import {Monitor} from '../base/monitor';
@@ -69,7 +69,7 @@
views.push({
key: 'flamegraph_selection',
name: 'Flamegraph Selection',
- content: m(FlamegraphDetailsPanel, {
+ content: m(LegacyFlamegraphDetailsPanel, {
cache: globals.areaFlamegraphCache,
selection: this.flamegraphSelection,
}),
diff --git a/ui/src/frontend/cpu_profile_panel.ts b/ui/src/frontend/cpu_profile_panel.ts
index 0d46381..0987d50 100644
--- a/ui/src/frontend/cpu_profile_panel.ts
+++ b/ui/src/frontend/cpu_profile_panel.ts
@@ -15,7 +15,7 @@
import m from 'mithril';
import {globals} from './globals';
-import {CallsiteInfo} from '../common/flamegraph_util';
+import {CallsiteInfo} from '../common/legacy_flamegraph_util';
interface CpuProfileDetailsPanelAttrs {}
diff --git a/ui/src/frontend/error_dialog.ts b/ui/src/frontend/error_dialog.ts
index 835292e..11e55d2 100644
--- a/ui/src/frontend/error_dialog.ts
+++ b/ui/src/frontend/error_dialog.ts
@@ -16,7 +16,7 @@
import {ErrorDetails} from '../base/logging';
import {EXTENSION_URL} from '../common/recordingV2/recording_utils';
-import {TraceGcsUploader} from '../common/upload_utils';
+import {GcsUploader} from '../common/gcs_uploader';
import {RECORDING_V2_FLAG} from '../core/feature_flags';
import {raf} from '../core/raf_scheduler';
import {VERSION} from '../gen/perfetto_version';
@@ -107,7 +107,7 @@
private uploadStatus = '';
private userDescription = '';
private errorMessage = '';
- private uploader?: TraceGcsUploader;
+ private uploader?: GcsUploader;
constructor() {
this.traceState = 'NOT_AVAILABLE';
@@ -232,16 +232,18 @@
) {
this.traceState = 'UPLOADING';
this.uploadStatus = '';
- const uploader = new TraceGcsUploader(this.traceData, () => {
- raf.scheduleFullRedraw();
- this.uploadStatus = uploader.getEtaString();
- if (uploader.state === 'UPLOADED') {
- this.traceState = 'UPLOADED';
- this.traceUrl = uploader.uploadedUrl;
- } else if (uploader.state === 'ERROR') {
- this.traceState = 'NOT_UPLOADED';
- this.uploadStatus = uploader.error;
- }
+ const uploader = new GcsUploader(this.traceData, {
+ onProgress: () => {
+ raf.scheduleFullRedraw();
+ this.uploadStatus = uploader.getEtaString();
+ if (uploader.state === 'UPLOADED') {
+ this.traceState = 'UPLOADED';
+ this.traceUrl = uploader.uploadedUrl;
+ } else if (uploader.state === 'ERROR') {
+ this.traceState = 'NOT_UPLOADED';
+ this.uploadStatus = uploader.error;
+ }
+ },
});
this.uploader = uploader;
} else if (!checked && this.uploader) {
diff --git a/ui/src/frontend/globals.ts b/ui/src/frontend/globals.ts
index d2a1c57..7fab2d5 100644
--- a/ui/src/frontend/globals.ts
+++ b/ui/src/frontend/globals.ts
@@ -55,7 +55,7 @@
import {SelectionManager, LegacySelection} from '../core/selection_manager';
import {Optional, exists} from '../base/utils';
import {OmniboxManager} from './omnibox_manager';
-import {CallsiteInfo} from '../common/flamegraph_util';
+import {CallsiteInfo} from '../common/legacy_flamegraph_util';
import {FlamegraphCache} from '../core/flamegraph_cache';
const INSTANT_FOCUS_DURATION = 1n;
diff --git a/ui/src/frontend/flamegraph.ts b/ui/src/frontend/legacy_flamegraph.ts
similarity index 99%
rename from ui/src/frontend/flamegraph.ts
rename to ui/src/frontend/legacy_flamegraph.ts
index 6c90916..19db899 100644
--- a/ui/src/frontend/flamegraph.ts
+++ b/ui/src/frontend/legacy_flamegraph.ts
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-import {CallsiteInfo} from '../common/flamegraph_util';
+import {CallsiteInfo} from '../common/legacy_flamegraph_util';
import {searchSegment} from '../base/binary_search';
import {cropText} from '../common/canvas_utils';
diff --git a/ui/src/frontend/flamegraph_panel.ts b/ui/src/frontend/legacy_flamegraph_panel.ts
similarity index 98%
rename from ui/src/frontend/flamegraph_panel.ts
rename to ui/src/frontend/legacy_flamegraph_panel.ts
index c80713c..a7440d8 100644
--- a/ui/src/frontend/flamegraph_panel.ts
+++ b/ui/src/frontend/legacy_flamegraph_panel.ts
@@ -26,7 +26,7 @@
findRootSize,
mergeCallsites,
viewingOptions,
-} from '../common/flamegraph_util';
+} from '../common/legacy_flamegraph_util';
import {ProfileType} from '../common/state';
import {raf} from '../core/raf_scheduler';
import {Button} from '../widgets/button';
@@ -36,7 +36,7 @@
import {EmptyState} from '../widgets/empty_state';
import {Spinner} from '../widgets/spinner';
-import {Flamegraph, NodeRendering} from './flamegraph';
+import {Flamegraph, NodeRendering} from './legacy_flamegraph';
import {globals} from './globals';
import {debounce} from './rate_limiters';
import {Router} from './router';
@@ -147,7 +147,7 @@
}>;
}
-export class FlamegraphDetailsPanel
+export class LegacyFlamegraphDetailsPanel
implements m.ClassComponent<FlamegraphDetailsPanelAttrs>
{
private undebouncedFocusRegex = '';
@@ -204,7 +204,7 @@
this.state.result = undefined;
const state = this.state;
this.queryLimiter.schedule(() => {
- return FlamegraphDetailsPanel.fetchQueryResults(
+ return LegacyFlamegraphDetailsPanel.fetchQueryResults(
assertExists(this.getCurrentEngine()),
attrs.cache,
state,
@@ -227,7 +227,7 @@
);
this.state.result.renderResults = mergeCallsites(
expanded,
- FlamegraphDetailsPanel.getMinSizeDisplayed(
+ LegacyFlamegraphDetailsPanel.getMinSizeDisplayed(
expanded,
selected?.totalSize,
),
@@ -461,13 +461,13 @@
cache: FlamegraphCache,
state: FlamegraphState,
) {
- const table = await FlamegraphDetailsPanel.prepareViewsAndTables(
+ const table = await LegacyFlamegraphDetailsPanel.prepareViewsAndTables(
engine,
cache,
state,
);
const queryResults =
- await FlamegraphDetailsPanel.getFlamegraphDataFromTables(
+ await LegacyFlamegraphDetailsPanel.getFlamegraphDataFromTables(
engine,
table,
state.viewingOption,
diff --git a/ui/src/frontend/flamegraph_unittest.ts b/ui/src/frontend/legacy_flamegraph_unittest.ts
similarity index 96%
rename from ui/src/frontend/flamegraph_unittest.ts
rename to ui/src/frontend/legacy_flamegraph_unittest.ts
index 0cc463c..ddb006a 100644
--- a/ui/src/frontend/flamegraph_unittest.ts
+++ b/ui/src/frontend/legacy_flamegraph_unittest.ts
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-import {splitIfTooBig} from './flamegraph';
+import {splitIfTooBig} from './legacy_flamegraph';
test('textGoingToMultipleLines', () => {
const text = 'Dummy text to go to multiple lines.';
diff --git a/ui/src/frontend/permalink.ts b/ui/src/frontend/permalink.ts
index 888fe55..d101b8d 100644
--- a/ui/src/frontend/permalink.ts
+++ b/ui/src/frontend/permalink.ts
@@ -22,14 +22,7 @@
createEmptyState,
} from '../common/empty_state';
import {EngineConfig, ObjectById, STATE_VERSION, State} from '../common/state';
-import {
- BUCKET_NAME,
- TraceGcsUploader,
- buggyToSha256,
- deserializeStateObject,
- saveState,
- toSha256,
-} from '../common/upload_utils';
+import {BUCKET_NAME, GcsUploader, MIME_JSON} from '../common/gcs_uploader';
import {
RecordConfig,
recordConfigValidator,
@@ -41,6 +34,7 @@
} from './publish';
import {Router} from './router';
import {showModal} from '../widgets/modal';
+import {isString} from '../base/object_utils';
export interface PermalinkOptions {
isRecordingConfig?: boolean;
@@ -89,23 +83,25 @@
if (dataToUpload !== undefined) {
updateStatus(`Uploading ${traceName}`);
- const uploader = new TraceGcsUploader(dataToUpload, () => {
- switch (uploader.state) {
- case 'UPLOADING':
- const statusTxt = `Uploading ${uploader.getEtaString()}`;
- updateStatus(statusTxt);
- break;
- case 'UPLOADED':
- // Convert state to use URLs and remove permalink.
- const url = uploader.uploadedUrl;
- uploadState = produce(globals.state, (draft) => {
- assertExists(draft.engine).source = {type: 'URL', url};
- });
- break;
- case 'ERROR':
- updateStatus(`Upload failed ${uploader.error}`);
- break;
- } // switch (state)
+ const uploader = new GcsUploader(dataToUpload, {
+ onProgress: () => {
+ switch (uploader.state) {
+ case 'UPLOADING':
+ const statusTxt = `Uploading ${uploader.getEtaString()}`;
+ updateStatus(statusTxt);
+ break;
+ case 'UPLOADED':
+ // Convert state to use URLs and remove permalink.
+ const url = uploader.uploadedUrl;
+ uploadState = produce(globals.state, (draft) => {
+ assertExists(draft.engine).source = {type: 'URL', url};
+ });
+ break;
+ case 'ERROR':
+ updateStatus(`Upload failed ${uploader.error}`);
+ break;
+ } // switch (state)
+ },
}); // onProgress
await uploader.waitForCompletion();
}
@@ -118,6 +114,13 @@
return hash;
}
+async function saveState(stateOrConfig: State | RecordConfig): Promise<string> {
+ const stateJson = serializeStateObject(stateOrConfig);
+ const uploader = new GcsUploader(stateJson, {mimeType: MIME_JSON});
+ await uploader.waitForCompletion();
+ return uploader.uploadedFileName;
+}
+
function updateStatus(msg: string): void {
// TODO(hjd): Unify loading updates.
globals.dispatch(
@@ -157,24 +160,57 @@
);
}
const text = await response.text();
- const stateHash = await toSha256(text);
const state = deserializeStateObject<State>(text);
- if (stateHash !== id) {
- // Old permalinks incorrectly dropped some digits from the
- // hexdigest of the SHA256. We don't want to invalidate those
- // links so we also compute the old string and try that here
- // also.
- const buggyStateHash = await buggyToSha256(text);
- if (buggyStateHash !== id) {
- throw new Error(`State hash does not match ${id} vs. ${stateHash}`);
- }
- }
if (!isRecordConfig(state)) {
return upgradeState(state);
}
return state;
}
+function deserializeStateObject<T>(json: string): T {
+ const object = JSON.parse(json, (_key, value) => {
+ if (isSerializedBigint(value)) {
+ return BigInt(value.value);
+ }
+ return value;
+ });
+ return object as T;
+}
+
+export function serializeStateObject(object: unknown): string {
+ const json = JSON.stringify(object, (key, value) => {
+ if (typeof value === 'bigint') {
+ return {
+ __kind: 'bigint',
+ value: value.toString(),
+ };
+ }
+ return key === 'nonSerializableState' ? undefined : value;
+ });
+ return json;
+}
+
+// Bigint's are not serializable using JSON.stringify, so we use a special
+// object when serialising
+type SerializedBigint = {
+ __kind: 'bigint';
+ value: string;
+};
+
+// Check if a value looks like a serialized bigint
+function isSerializedBigint(value: unknown): value is SerializedBigint {
+ if (value === null) {
+ return false;
+ }
+ if (typeof value !== 'object') {
+ return false;
+ }
+ if ('__kind' in value && 'value' in value) {
+ return value.__kind === 'bigint' && isString(value.value);
+ }
+ return false;
+}
+
function isRecordConfig(
stateOrConfig: State | RecordConfig,
): stateOrConfig is RecordConfig {
diff --git a/ui/src/frontend/sql_table/tab.ts b/ui/src/frontend/sql_table/tab.ts
index 0affa5f..4e42eb2 100644
--- a/ui/src/frontend/sql_table/tab.ts
+++ b/ui/src/frontend/sql_table/tab.ts
@@ -36,6 +36,7 @@
table: SqlTableDescription;
displayName?: string;
filters?: Filter[];
+ imports?: string[];
}
export function addSqlTableTab(config: SqlTableTabConfig): void {
@@ -67,6 +68,7 @@
this.engine,
this.config.table,
this.config.filters,
+ this.config.imports,
);
}
diff --git a/ui/src/frontend/thread_slice_details_tab.ts b/ui/src/frontend/thread_slice_details_tab.ts
index 666b43f..56f6405 100644
--- a/ui/src/frontend/thread_slice_details_tab.ts
+++ b/ui/src/frontend/thread_slice_details_tab.ts
@@ -28,7 +28,9 @@
import {Tree, TreeNode} from '../widgets/tree';
import {BottomTab, NewBottomTabArgs} from './bottom_tab';
+import {addDebugSliceTrack} from './debug_tracks/debug_tracks';
import {FlowPoint, globals} from './globals';
+import {addQueryResultsTab} from './query_result_tab';
import {hasArgs, renderArguments} from './slice_args';
import {renderDetails} from './slice_details';
import {getSlice, SliceDetails, SliceRef} from './sql/slice';
@@ -36,10 +38,10 @@
BreakdownByThreadState,
breakDownIntervalByThreadState,
} from './sql/thread_state';
+import {addSqlTableTab} from './sql_table/tab';
+import {SqlTables} from './sql_table/well_known_tables';
import {asSliceSqlId} from './sql_types';
import {DurationWidget} from './widgets/duration';
-import {addDebugSliceTrack} from './debug_tracks/debug_tracks';
-import {addQueryResultsTab} from './query_result_tab';
interface ContextMenuItem {
name: string;
@@ -85,6 +87,30 @@
const ITEMS: ContextMenuItem[] = [
{
+ name: 'Ancestor slices',
+ shouldDisplay: (slice: SliceDetails) => slice.parentId !== undefined,
+ run: (slice: SliceDetails) =>
+ addSqlTableTab({
+ table: SqlTables.slice,
+ filters: [
+ `id IN (SELECT id FROM _slice_ancestor_and_self(${slice.id}))`,
+ ],
+ imports: ['slices.hierarchy'],
+ }),
+ },
+ {
+ name: 'Descendant slices',
+ shouldDisplay: () => true,
+ run: (slice: SliceDetails) =>
+ addSqlTableTab({
+ table: SqlTables.slice,
+ filters: [
+ `id IN (SELECT id FROM _slice_descendant_and_self(${slice.id}))`,
+ ],
+ imports: ['slices.hierarchy'],
+ }),
+ },
+ {
name: 'Average duration of slice name',
shouldDisplay: (slice: SliceDetails) => hasName(slice),
run: (slice: SliceDetails) =>
@@ -102,7 +128,9 @@
hasPid(slice),
run: (slice: SliceDetails) => {
const engine = getEngine();
- if (engine === undefined) return;
+ if (engine === undefined) {
+ return;
+ }
engine
.query(
`
diff --git a/ui/src/plugins/com.google.PixelMemory/index.ts b/ui/src/plugins/com.google.PixelMemory/index.ts
index 0412e19..80ac5af 100644
--- a/ui/src/plugins/com.google.PixelMemory/index.ts
+++ b/ui/src/plugins/com.google.PixelMemory/index.ts
@@ -27,15 +27,17 @@
if (pid === null) return;
}
const RSS_ALL = `
- INCLUDE PERFETTO MODULE memory.linux.process;
- INCLUDE PERFETTO MODULE memory.android.gpu;
+ INCLUDE PERFETTO MODULE android.gpu.memory;
+ INCLUDE PERFETTO MODULE linux.memory.process;
DROP TABLE IF EXISTS process_mem_rss_anon_file_shmem_swap_gpu;
CREATE VIRTUAL TABLE process_mem_rss_anon_file_shmem_swap_gpu
USING
SPAN_OUTER_JOIN(
- memory_gpu_per_process PARTITIONED upid, memory_rss_and_swap_per_process PARTITIONED upid);
+ android_gpu_memory_per_process PARTITIONED upid,
+ memory_rss_and_swap_per_process PARTITIONED upid
+ );
`;
await ctx.engine.query(RSS_ALL);
await addDebugCounterTrack(
diff --git a/ui/src/plugins/dev.perfetto.AndroidLongBatteryTracing/index.ts b/ui/src/plugins/dev.perfetto.AndroidLongBatteryTracing/index.ts
index 2c3af80..736c93f 100644
--- a/ui/src/plugins/dev.perfetto.AndroidLongBatteryTracing/index.ts
+++ b/ui/src/plugins/dev.perfetto.AndroidLongBatteryTracing/index.ts
@@ -33,6 +33,35 @@
dur: number;
}
+const PACKAGE_LOOKUP = `
+ create or replace perfetto table package_name_lookup as
+ with installed as (
+ select uid, string_agg(package_name, ',') as name
+ from package_list
+ where uid >= 10000
+ group by 1
+ ),
+ system(uid, name) as (
+ values
+ (0, 'AID_ROOT'),
+ (1000, 'AID_SYSTEM_USER'),
+ (1001, 'AID_RADIO'),
+ (1082, 'AID_ARTD')
+ )
+ select uid, name from installed
+ union all
+ select uid, name from system
+ order by uid;
+
+ -- Adds a "package_name" column by joining on "uid" from the source table.
+ create or replace perfetto macro add_package_name(src TableOrSubquery) returns TableOrSubquery as (
+ select A.*, ifnull(B.name, "uid=" || A.uid) as package_name
+ from $src as A
+ left join package_name_lookup as B
+ on (B.uid = (A.uid % 100000))
+ );
+`;
+
const DEFAULT_NETWORK = `
with base as (
select
@@ -555,28 +584,13 @@
(lead(time_millis) over (partition by uid, cluster order by ts) - time_millis) * 1000000.0 as cpu_dur
from base
),
- app_package_list as (
- select
- uid,
- group_concat(package_name) as package_name
- from package_list
- where uid >= 10000
- group by 1
- ),
with_ratio as (
select
ts,
iif(dur is null, 0, max(0, 100.0 * cpu_dur / dur)) as value,
case cluster when 0 then 'little' when 1 then 'mid' when 2 then 'big' else 'cl-' || cluster end as cluster,
- case
- when uid = 0 then 'AID_ROOT'
- when uid = 1000 then 'AID_SYSTEM_USER'
- when uid = 1001 then 'AID_RADIO'
- when uid = 1082 then 'AID_ARTD'
- when pl.package_name is null then 'uid=' || uid
- else pl.package_name
- end as pkg
- from with_windows left join app_package_list pl using(uid)
+ package_name as pkg
+ from add_package_name!(with_windows)
)
select ts, sum(value) as value, cluster, pkg
from with_ratio
@@ -1075,23 +1089,12 @@
where tx_bytes >=0 and rx_bytes >=0
group by 1,2,3
having tx_bytes > 0 or rx_bytes > 0
- ),
- app_package_list as (
- select
- uid,
- group_concat(package_name) as package_name
- from package_list
- where uid >= 10000
- group by 1
)
select
ts,
dur,
- case
- when pl.package_name is null then 'uid=' || uid
- else pl.package_name
- end || ' TX ' || tx_bytes || ' bytes / RX ' || rx_bytes || ' bytes' as name
- from step3 left join app_package_list pl using(uid)
+ format("%s: TX %d bytes / RX %d bytes", package_name, tx_bytes, rx_bytes) as name
+ from add_package_name!(step3)
`;
// See go/bt_system_context_report for reference on the bit-twiddling.
@@ -1266,11 +1269,12 @@
ts - 60000000000 as ts,
safe_dur + 60000000000 as dur,
str_value AS name,
- ifnull(
- (select package_name from package_list where uid = int_value % 100000),
- "uid="||int_value) as package
- FROM android_battery_stats_event_slices
- WHERE track_name = "battery_stats.longwake"`,
+ package_name as package
+ FROM add_package_name!((
+ select *, int_value as uid
+ from android_battery_stats_event_slices
+ WHERE track_name = "battery_stats.longwake"
+ ))`,
undefined,
['package'],
);
@@ -1806,8 +1810,9 @@
const containedTraces = (ctx.openerPluginArgs?.containedTraces ??
[]) as ContainedTrace[];
+ await ctx.engine.query(PACKAGE_LOOKUP);
await this.addNetworkSummary(ctx, features),
- await this.addModemDetail(ctx, features);
+ await this.addModemDetail(ctx, features);
await this.addKernelWakelocks(ctx, features);
await this.addWakeups(ctx, features);
await this.addDeviceState(ctx, features);
diff --git a/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/OWNERS b/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/OWNERS
new file mode 100644
index 0000000..b655639
--- /dev/null
+++ b/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/OWNERS
@@ -0,0 +1,4 @@
+paulsoumyadeep@google.com
+nishantpanwar@google.com
+bvineeth@google.com
+nicomazz@google.com
\ No newline at end of file
diff --git a/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/handlers/handlerRegistry.ts b/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/handlers/handlerRegistry.ts
new file mode 100644
index 0000000..f6af47e
--- /dev/null
+++ b/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/handlers/handlerRegistry.ts
@@ -0,0 +1,18 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {MetricHandler} from './metricUtils';
+
+// TODO: b/337774166 - Add handlers for the metric name categories here
+export const METRIC_HANDLERS: MetricHandler[] = [];
diff --git a/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/handlers/metricUtils.ts b/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/handlers/metricUtils.ts
new file mode 100644
index 0000000..a1380f7
--- /dev/null
+++ b/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/handlers/metricUtils.ts
@@ -0,0 +1,57 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {PluginContextTrace} from '../../../public';
+
+// TODO: b/337774166 - Perfetto FT handler MetricData
+export interface FullTraceMetricData {}
+
+// TODO: b/337774166 - Perfetto CUJ handler MetricData
+export interface CujScopedMetricData {}
+
+// TODO: b/337774166 - Blocking Call handler MetricData
+export interface BlockingCallMetricData {}
+
+// Common MetricData for all handler. If new needed then add here.
+export type MetricData =
+ | FullTraceMetricData
+ | CujScopedMetricData
+ | BlockingCallMetricData;
+
+/**
+ * Common interface for debug track handlers
+ */
+export interface MetricHandler {
+ /**
+ * Match metric key & return parsed data if successful.
+ *
+ * @param {string} metricKey The metric key to match.
+ * @returns {MetricData | undefined} Parsed data or undefined if no match.
+ */
+ match(metricKey: string): MetricData | undefined;
+
+ /**
+ * Add debug track for parsed metric data.
+ *
+ * @param {MetricData} metricData The parsed metric data.
+ * @param {PluginContextTrace} ctx The plugin context.
+ * @param {string} type 'static' onTraceload to register, 'debug' on command.
+ * @returns {void}
+ */
+ addDebugTrack(
+ metricData: MetricData,
+ ctx: PluginContextTrace,
+ type: 'static' | 'debug',
+ ): void;
+}
diff --git a/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/index.ts b/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/index.ts
new file mode 100644
index 0000000..fb15e2e
--- /dev/null
+++ b/ui/src/plugins/dev.perfetto.PinAndroidPerfMetrics/index.ts
@@ -0,0 +1,88 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {Plugin, PluginContextTrace, PluginDescriptor} from '../../public';
+import {METRIC_HANDLERS} from './handlers/handlerRegistry';
+
+const PLUGIN_ID = 'dev.perfetto.PinAndroidPerfMetrics';
+
+/**
+ * Plugin that adds and pins the debug track for the metric passed
+ * For more context -
+ * This plugin reads the names of regressed metrics from the url upon loading
+ * It then checks the metric names against some handlers and if they
+ * match it accordingly adds the debug tracks for them
+ * This way when comparing two different perfetto traces before and after
+ * the regression, the user will not have to manually search for the
+ * slices related to the regressed metric
+ */
+class PinAndroidPerfMetrics implements Plugin {
+ private metrics: string[] = [];
+
+ onActivate(): void {
+ this.metrics = this.getMetricsFromHash();
+ }
+
+ async onTraceLoad(ctx: PluginContextTrace) {
+ ctx.registerCommand({
+ id: 'dev.perfetto.PinAndroidPerfMetrics#PinAndroidPerfMetrics',
+ name: 'Add and Pin: Jank Metric Slice',
+ callback: async (metric) => {
+ metric = prompt('Metrics names (seperated by comma)', '');
+ if (metric === null) return;
+ const metricList = metric.split(',');
+ this.callHandlers(metricList, ctx, 'debug');
+ },
+ });
+ if (this.metrics.length !== 0) {
+ this.callHandlers(this.metrics, ctx, 'static');
+ }
+ }
+
+ private callHandlers(
+ metricsList: string[],
+ ctx: PluginContextTrace,
+ type: 'static' | 'debug',
+ ) {
+ for (const metric of metricsList) {
+ for (const metricHandler of METRIC_HANDLERS) {
+ const match = metricHandler.match(metric);
+ if (match) {
+ metricHandler.addDebugTrack(match, ctx, type);
+ break;
+ }
+ }
+ }
+ }
+
+ private getMetricsFromHash(): string[] {
+ const metricVal = location.hash;
+ const regex = new RegExp(`${PLUGIN_ID}:metrics=(.*)`);
+ const match = metricVal.match(regex);
+ if (match === null) {
+ return [];
+ }
+ const capturedString = match[1];
+ if (capturedString.includes('--')) {
+ return capturedString.split('--');
+ } else {
+ return [capturedString];
+ }
+ }
+}
+
+export const plugin: PluginDescriptor = {
+ pluginId: PLUGIN_ID,
+ plugin: PinAndroidPerfMetrics,
+};