Merge "ui: Fix drop table crash" into main
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker.cc b/src/trace_processor/importers/proto/heap_graph_tracker.cc
index 02c3ae2..f8971c6 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker.cc
+++ b/src/trace_processor/importers/proto/heap_graph_tracker.cc
@@ -755,28 +755,34 @@
StringId kind = cls_row_ref.kind();
- if (kind == InternTypeKindString(
+ bool is_ignored_reference =
+ kind == InternTypeKindString(
protos::pbzero::HeapGraphType::KIND_WEAK_REFERENCE) ||
kind == InternTypeKindString(
protos::pbzero::HeapGraphType::KIND_SOFT_REFERENCE) ||
kind == InternTypeKindString(
protos::pbzero::HeapGraphType::KIND_FINALIZER_REFERENCE) ||
kind == InternTypeKindString(
- protos::pbzero::HeapGraphType::KIND_PHANTOM_REFERENCE)) {
- // Do not follow weak / soft / finalizer / phantom references.
- return {};
- }
+ protos::pbzero::HeapGraphType::KIND_PHANTOM_REFERENCE);
base::FlatSet<ObjectTable::Id> children;
- ForReferenceSet(storage_, object,
- [object, &children](ReferenceTable::RowReference ref) {
- PERFETTO_CHECK(ref.owner_id() == object.id());
- auto opt_owned = ref.owned_id();
- if (opt_owned) {
- children.insert(*opt_owned);
- }
- return true;
- });
+ ForReferenceSet(
+ storage_, object,
+ [object, &children, is_ignored_reference,
+ this](ReferenceTable::RowReference ref) {
+ PERFETTO_CHECK(ref.owner_id() == object.id());
+ auto opt_owned = ref.owned_id();
+ if (!opt_owned) {
+ return true;
+ }
+ if (is_ignored_reference && ref.field_name() == referent_str_id_) {
+ // If `object` is a special reference kind, its
+ // "java.lang.ref.Reference.referent" field should be ignored.
+ return true;
+ }
+ children.insert(*opt_owned);
+ return true;
+ });
return children;
}
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc b/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
index 6ae4f09..8e9ee32 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
+++ b/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
@@ -215,14 +215,12 @@
constexpr uint64_t kY = 2;
constexpr uint64_t kA = 3;
constexpr uint64_t kB = 4;
- constexpr uint64_t kWeakRef = 5;
base::StringView field = base::StringView("foo");
StringPool::Id x = context.storage->InternString("X");
StringPool::Id y = context.storage->InternString("Y");
StringPool::Id a = context.storage->InternString("A");
StringPool::Id b = context.storage->InternString("B");
- StringPool::Id weak_ref = context.storage->InternString("WeakReference");
tracker.AddInternedFieldName(kSeqId, kField, field);
@@ -244,22 +242,6 @@
/*field_name_ids=*/{}, /*superclass_id=*/0,
/*classloader_id=*/0, /*no_fields=*/false,
protos::pbzero::HeapGraphType::KIND_NORMAL);
- tracker.AddInternedType(kSeqId, kWeakRef, weak_ref, kLocation,
- /*object_size=*/0,
- /*field_name_ids=*/{}, /*superclass_id=*/0,
- /*classloader_id=*/0, /*no_fields=*/false,
- protos::pbzero::HeapGraphType::KIND_WEAK_REFERENCE);
- {
- HeapGraphTracker::SourceObject obj;
- obj.object_id = 999;
- obj.self_size = 999;
- obj.type_id = kWeakRef;
- obj.field_name_ids = {kField};
- obj.referred_objects = {5};
-
- tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
- }
-
{
HeapGraphTracker::SourceObject obj;
obj.object_id = 1;
@@ -309,7 +291,6 @@
HeapGraphTracker::SourceRoot root;
root.root_type = protos::pbzero::HeapGraphRoot::ROOT_UNKNOWN;
root.object_ids.emplace_back(1);
- root.object_ids.emplace_back(999);
tracker.AddRoot(kSeqId, kPid, kTimestamp, root);
tracker.FinalizeProfile(kSeqId);
@@ -318,16 +299,129 @@
ASSERT_NE(flame, nullptr);
auto cumulative_sizes = flame->cumulative_size().ToVectorForTesting();
- EXPECT_THAT(cumulative_sizes, UnorderedElementsAre(15, 4, 14, 5, 999));
+ EXPECT_THAT(cumulative_sizes, UnorderedElementsAre(15, 4, 14, 5));
auto cumulative_counts = flame->cumulative_count().ToVectorForTesting();
- EXPECT_THAT(cumulative_counts, UnorderedElementsAre(5, 4, 1, 1, 1));
+ EXPECT_THAT(cumulative_counts, UnorderedElementsAre(5, 4, 1, 1));
auto sizes = flame->size().ToVectorForTesting();
- EXPECT_THAT(sizes, UnorderedElementsAre(1, 5, 4, 5, 999));
+ EXPECT_THAT(sizes, UnorderedElementsAre(1, 5, 4, 5));
auto counts = flame->count().ToVectorForTesting();
- EXPECT_THAT(counts, UnorderedElementsAre(1, 2, 1, 1, 1));
+ EXPECT_THAT(counts, UnorderedElementsAre(1, 2, 1, 1));
+}
+
+TEST(HeapGraphTrackerTest, BuildFlamegraphWeakReferences) {
+ // Regression test for http://b.corp.google.com/issues/302662734:
+ // For weak (and other) references, we should not follow the
+ // `java.lang.ref.Reference.referent` field, but we should follow other
+ // fields.
+ //
+ // 2@A 4@B
+ // (java.lang.ref.Reference.referent) \ / (X.other)
+ // 1@X (extends WeakReference)
+
+ constexpr uint64_t kSeqId = 1;
+ constexpr UniquePid kPid = 1;
+ constexpr int64_t kTimestamp = 1;
+
+ TraceProcessorContext context;
+ context.storage.reset(new TraceStorage());
+ context.process_tracker.reset(new ProcessTracker(&context));
+ context.process_tracker->GetOrCreateProcess(kPid);
+
+ HeapGraphTracker tracker(context.storage.get());
+
+ constexpr uint64_t kLocation = 0;
+
+ base::StringView referent_field =
+ base::StringView("java.lang.ref.Reference.referent");
+ constexpr uint64_t kReferentField = 1;
+ base::StringView other_field = base::StringView("X.other");
+ constexpr uint64_t kOtherField = 2;
+
+ constexpr uint64_t kX = 1;
+ StringPool::Id x = context.storage->InternString("X");
+ constexpr uint64_t kA = 2;
+ StringPool::Id a = context.storage->InternString("A");
+ constexpr uint64_t kB = 4;
+ StringPool::Id b = context.storage->InternString("B");
+ constexpr uint64_t kWeakRef = 5;
+ StringPool::Id weak_ref = context.storage->InternString("WeakReference");
+
+ tracker.AddInternedFieldName(kSeqId, kReferentField, referent_field);
+ tracker.AddInternedFieldName(kSeqId, kOtherField, other_field);
+
+ tracker.AddInternedLocationName(kSeqId, kLocation,
+ context.storage->InternString("location"));
+
+ tracker.AddInternedType(kSeqId, kWeakRef, weak_ref, kLocation,
+ /*object_size=*/0,
+ /*field_name_ids=*/{kReferentField},
+ /*superclass_id=*/0,
+ /*classloader_id=*/0, /*no_fields=*/false,
+ protos::pbzero::HeapGraphType::KIND_WEAK_REFERENCE);
+ tracker.AddInternedType(kSeqId, kX, x, kLocation,
+ /*object_size=*/0,
+ /*field_name_ids=*/{kOtherField},
+ /*superclass_id=*/kWeakRef,
+ /*classloader_id=*/0, /*no_fields=*/false,
+ protos::pbzero::HeapGraphType::KIND_WEAK_REFERENCE);
+ tracker.AddInternedType(kSeqId, kA, a, kLocation, /*object_size=*/0,
+ /*field_name_ids=*/{}, /*superclass_id=*/0,
+ /*classloader_id=*/0, /*no_fields=*/false,
+ protos::pbzero::HeapGraphType::KIND_NORMAL);
+ tracker.AddInternedType(kSeqId, kB, b, kLocation, /*object_size=*/0,
+ /*field_name_ids=*/{}, /*superclass_id=*/0,
+ /*classloader_id=*/0, /*no_fields=*/false,
+ protos::pbzero::HeapGraphType::KIND_NORMAL);
+ {
+ HeapGraphTracker::SourceObject obj;
+ obj.object_id = 1;
+ obj.self_size = 1;
+ obj.type_id = kX;
+ obj.referred_objects = {/*X.other*/ 4,
+ /*java.lang.ref.Reference.referent*/ 2};
+ tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+ }
+
+ {
+ HeapGraphTracker::SourceObject obj;
+ obj.object_id = 2;
+ obj.self_size = 2;
+ obj.type_id = kA;
+ tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+ }
+
+ {
+ HeapGraphTracker::SourceObject obj;
+ obj.object_id = 4;
+ obj.self_size = 4;
+ obj.type_id = kB;
+ tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+ }
+
+ HeapGraphTracker::SourceRoot root;
+ root.root_type = protos::pbzero::HeapGraphRoot::ROOT_UNKNOWN;
+ root.object_ids.emplace_back(1);
+ tracker.AddRoot(kSeqId, kPid, kTimestamp, root);
+
+ tracker.FinalizeProfile(kSeqId);
+ std::unique_ptr<tables::ExperimentalFlamegraphNodesTable> flame =
+ tracker.BuildFlamegraph(kPid, kTimestamp);
+ ASSERT_NE(flame, nullptr);
+
+ auto cumulative_sizes = flame->cumulative_size().ToVectorForTesting();
+ EXPECT_THAT(cumulative_sizes, UnorderedElementsAre(4, 4 + 1));
+
+ auto cumulative_counts = flame->cumulative_count().ToVectorForTesting();
+ EXPECT_THAT(cumulative_counts, UnorderedElementsAre(1, 1 + 1));
+
+ auto sizes = flame->size().ToVectorForTesting();
+ EXPECT_THAT(sizes, UnorderedElementsAre(1, 4));
+
+ auto counts = flame->count().ToVectorForTesting();
+ EXPECT_THAT(counts, UnorderedElementsAre(1, 1));
}
static const char kArray[] = "X[]";
diff --git a/src/trace_processor/perfetto_sql/stdlib/experimental/thread_executing_span.sql b/src/trace_processor/perfetto_sql/stdlib/experimental/thread_executing_span.sql
index ef4921a..b0f9310 100644
--- a/src/trace_processor/perfetto_sql/stdlib/experimental/thread_executing_span.sql
+++ b/src/trace_processor/perfetto_sql/stdlib/experimental/thread_executing_span.sql
@@ -440,19 +440,53 @@
SELECT ts, dur, id, slice_id, slice_depth, slice_name
FROM internal_span_graph_slice_sp;
--- |experimental_thread_executing_span_graph| + thread_state view span joined with critical_path information.
-CREATE VIRTUAL TABLE internal_critical_path_thread_state_sp
-USING
- SPAN_JOIN(
- internal_span_graph_thread_state PARTITIONED id,
- internal_critical_path PARTITIONED id);
+-- |experimental_thread_executing_span_graph| + thread_state view joined with critical_path information.
+CREATE PERFETTO TABLE internal_critical_path_thread_state AS
+WITH span AS MATERIALIZED (
+ SELECT * FROM internal_critical_path
+ ),
+ span_starts AS (
+ SELECT
+ span.id,
+ span.utid,
+ span.critical_path_id,
+ span.critical_path_blocked_dur,
+ span.critical_path_blocked_state,
+ span.critical_path_blocked_function,
+ span.critical_path_utid,
+ thread_state_id,
+ MAX(thread_state.ts, span.ts) AS ts,
+ span.ts + span.dur AS span_end_ts,
+ thread_state.ts + thread_state.dur AS thread_state_end_ts,
+ thread_state.state,
+ thread_state.function,
+ thread_state.cpu
+ FROM span
+ JOIN internal_span_graph_thread_state_sp thread_state USING(id)
+ )
+SELECT
+ id,
+ thread_state_id,
+ ts,
+ MIN(span_end_ts, thread_state_end_ts) - ts AS dur,
+ utid,
+ state,
+ function,
+ cpu,
+ critical_path_id,
+ critical_path_blocked_dur,
+ critical_path_blocked_state,
+ critical_path_blocked_function,
+ critical_path_utid
+FROM span_starts
+WHERE MIN(span_end_ts, thread_state_end_ts) - ts > 0;
-- |experimental_thread_executing_span_graph| + thread_state + critical_path span joined with
-- |experimental_thread_executing_span_graph| + slice view.
CREATE VIRTUAL TABLE internal_critical_path_sp
USING
SPAN_LEFT_JOIN(
- internal_critical_path_thread_state_sp PARTITIONED id,
+ internal_critical_path_thread_state PARTITIONED id,
internal_span_graph_slice PARTITIONED id);
-- Flattened slices span joined with their thread_states. This contains the 'self' information