Merge "processor: Some more fixes for chromium json export"
diff --git a/gn/perfetto.gni b/gn/perfetto.gni
index a8492e5..0e96e07 100644
--- a/gn/perfetto.gni
+++ b/gn/perfetto.gni
@@ -205,10 +205,6 @@
perfetto_build_standalone && enable_perfetto_trace_processor &&
(is_linux || is_android || is_mac)
- # Enables metrics support in the trace processor.
- enable_perfetto_trace_processor_metrics =
- enable_perfetto_trace_processor && !build_with_chromium
-
# Enables ftrace support in the trace processor.
enable_perfetto_trace_processor_ftrace =
enable_perfetto_trace_processor && !build_with_chromium
@@ -257,6 +253,10 @@
enable_perfetto_trace_processor_system_probes ||
enable_perfetto_trace_processor_android_probes
+ # Enables metrics support in the trace processor, which require SQL support.
+ enable_perfetto_trace_processor_metrics =
+ enable_perfetto_trace_processor_sqlite
+
# Enables the trace_to_text tool.
enable_perfetto_tools_trace_to_text =
enable_perfetto_tools && enable_perfetto_trace_processor_sqlite
@@ -302,5 +302,15 @@
assert(perfetto_force_dlog == "" || perfetto_force_dlog == "on" ||
perfetto_force_dlog == "off")
-# The UI requires SQLite support in trace processor.
+# Metrics, the UI, and trace to text require SQLite support in trace processor.
+assert(!enable_perfetto_trace_processor_metrics ||
+ enable_perfetto_trace_processor_sqlite)
+assert(!enable_perfetto_tools_trace_to_text ||
+ enable_perfetto_trace_processor_sqlite)
assert(!enable_perfetto_ui || enable_perfetto_trace_processor_sqlite)
+
+# Syscall support is required for ftrace, system probes, and android probes.
+assert(enable_perfetto_trace_processor_syscalls ||
+ (!enable_perfetto_trace_processor_ftrace &&
+ !enable_perfetto_trace_processor_system_probes &&
+ !enable_perfetto_trace_processor_android_probes))
diff --git a/src/trace_processor/export_json.cc b/src/trace_processor/export_json.cc
index b66206c..15ad611 100644
--- a/src/trace_processor/export_json.cc
+++ b/src/trace_processor/export_json.cc
@@ -44,6 +44,7 @@
using IndexMap = perfetto::trace_processor::TraceStorage::Stats::IndexMap;
const char kLegacyEventArgsKey[] = "legacy_event";
+const char kLegacyEventOriginalTidKey[] = "original_tid";
const char kLegacyEventCategoryKey[] = "category";
const char kLegacyEventNameKey[] = "name";
const char kLegacyEventPhaseKey[] = "phase";
@@ -106,10 +107,11 @@
if (label_filter_ && !label_filter_("traceEvents"))
return;
- if (!first_event_) {
- output_->AppendString(",");
- }
+ if (!first_event_)
+ output_->AppendString(",\n");
+
Json::FastWriter writer;
+ writer.omitEndingLineFeed();
ArgumentNameFilterPredicate argument_name_filter;
bool strip_args =
@@ -141,17 +143,18 @@
if (label_filter_ && !label_filter_("traceEvents"))
return;
- if (!first_event_) {
- output_->AppendString(",");
- }
+ if (!first_event_)
+ output_->AppendString(",\n");
+
Json::FastWriter writer;
+ writer.omitEndingLineFeed();
Json::Value value;
value["ph"] = "M";
value["cat"] = "__metadata";
value["ts"] = 0;
value["name"] = metadata_type;
- value["tid"] = Json::UInt(tid);
- value["pid"] = Json::UInt(pid);
+ value["tid"] = static_cast<int32_t>(tid);
+ value["pid"] = static_cast<int32_t>(pid);
Json::Value args;
args["name"] = metadata_value;
@@ -220,6 +223,7 @@
}
Json::FastWriter writer;
+ writer.omitEndingLineFeed();
if ((!label_filter_ || label_filter_("traceEvents")) &&
!user_trace_data_.empty()) {
user_trace_data_ += "]";
@@ -269,14 +273,14 @@
class ArgsBuilder {
public:
- explicit ArgsBuilder(const TraceStorage* storage) : storage_(storage) {
+ explicit ArgsBuilder(const TraceStorage* storage)
+ : storage_(storage), empty_value_(Json::objectValue) {
const TraceStorage::Args& args = storage->args();
- Json::Value empty_value(Json::objectValue);
if (args.args_count() == 0) {
- args_sets_.resize(1, empty_value);
+ args_sets_.resize(1, empty_value_);
return;
}
- args_sets_.resize(args.set_ids().back() + 1, empty_value);
+ args_sets_.resize(args.set_ids().back() + 1, empty_value_);
for (size_t i = 0; i < args.args_count(); ++i) {
ArgSetId set_id = args.set_ids()[i];
const char* key = GetNonNullString(storage_, args.keys()[i]);
@@ -287,6 +291,10 @@
}
const Json::Value& GetArgs(ArgSetId set_id) const {
+ // If |set_id| was empty and added to the storage last, it may not be in
+ // args_sets_.
+ if (set_id > args_sets_.size())
+ return empty_value_;
return args_sets_[set_id];
}
@@ -359,22 +367,19 @@
args["src"] = posted_from["file_name"];
}
}
- if (args["task"].empty()) {
+ if (args["task"].empty())
args.removeMember("task");
- }
}
}
}
const TraceStorage* storage_;
std::vector<Json::Value> args_sets_;
+ Json::Value empty_value_;
};
void ConvertLegacyFlowEventArgs(const Json::Value& legacy_args,
Json::Value* event) {
- if (legacy_args.isMember(kLegacyEventIdScopeKey))
- (*event)["scope"] = legacy_args[kLegacyEventIdScopeKey];
-
if (legacy_args.isMember(kLegacyEventBindIdKey)) {
(*event)["bind_id"] =
PrintUint64(legacy_args[kLegacyEventBindIdKey].asUInt64());
@@ -432,12 +437,22 @@
event["cat"] = GetNonNullString(storage, slices.categories()[i]);
event["name"] = GetNonNullString(storage, slices.names()[i]);
event["pid"] = 0;
+ event["tid"] = 0;
+
+ int32_t legacy_tid = 0;
event["args"] =
args_builder.GetArgs(slices.arg_set_ids()[i]); // Makes a copy.
if (event["args"].isMember(kLegacyEventArgsKey)) {
ConvertLegacyFlowEventArgs(event["args"][kLegacyEventArgsKey], &event);
+ if (event["args"][kLegacyEventArgsKey].isMember(
+ kLegacyEventOriginalTidKey)) {
+ legacy_tid = static_cast<int32_t>(
+ event["args"][kLegacyEventArgsKey][kLegacyEventOriginalTidKey]
+ .asInt());
+ }
+
event["args"].removeMember(kLegacyEventArgsKey);
}
@@ -500,19 +515,21 @@
// Synchronous (thread) slice or instant event.
UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
auto thread = storage->GetThread(utid);
+ event["tid"] = static_cast<int32_t>(thread.tid);
+ if (thread.upid) {
+ event["pid"] =
+ static_cast<int32_t>(storage->GetProcess(*thread.upid).pid);
+ }
if (duration_ns == 0) {
- event["ph"] = "i";
- if (thread.upid) {
- event["pid"] = storage->GetProcess(*thread.upid).pid;
- }
+ // Use "I" instead of "i" phase for backwards-compat with old consumers.
+ event["ph"] = "I";
if (thread_ts_ns > 0) {
event["tts"] = Json::Int64(thread_ts_ns / 1000);
}
if (thread_instruction_count > 0) {
event["ticount"] = Json::Int64(thread_instruction_count);
}
- event["tid"] = thread.tid;
event["s"] = "t";
} else {
if (duration_ns > 0) {
@@ -523,10 +540,6 @@
// write a begin event without end event in this case.
event["ph"] = "B";
}
- event["tid"] = thread.tid;
- if (thread.upid) {
- event["pid"] = storage->GetProcess(*thread.upid).pid;
- }
if (thread_ts_ns > 0) {
event["tts"] = Json::Int64(thread_ts_ns / 1000);
// Only write thread duration for completed events.
@@ -550,7 +563,10 @@
// Legacy async tracks are always process-associated.
PERFETTO_DCHECK(opt_process_row);
uint32_t upid = process_track.upid()[*opt_process_row];
- event["pid"] = storage->GetProcess(upid).pid;
+ event["pid"] = static_cast<int32_t>(storage->GetProcess(upid).pid);
+ event["tid"] =
+ legacy_tid ? legacy_tid
+ : static_cast<int32_t>(storage->GetProcess(upid).pid);
// Preserve original event IDs for legacy tracks. This is so that e.g.
// memory dump IDs show up correctly in the JSON trace.
@@ -560,20 +576,33 @@
uint64_t source_id =
static_cast<uint64_t>(track_args["source_id"].asInt64());
std::string source_scope = track_args["source_scope"].asString();
+ if (!source_scope.empty())
+ event["scope"] = source_scope;
bool source_id_is_process_scoped =
track_args["source_id_is_process_scoped"].asBool();
if (source_id_is_process_scoped) {
event["id2"]["local"] = PrintUint64(source_id);
} else {
- event["id2"]["global"] = PrintUint64(source_id);
+ // Some legacy importers don't understand "id2" fields, so we use the
+ // "usually" global "id" field instead. This works as long as the
+ // event phase is not in {'N', 'D', 'O', '(', ')'}, see
+ // "LOCAL_ID_PHASES" in catapult.
+ event["id"] = PrintUint64(source_id);
}
} else {
if (opt_process_row) {
uint32_t upid = process_track.upid()[*opt_process_row];
event["id2"]["local"] = PrintUint64(track_id);
- event["pid"] = storage->GetProcess(upid).pid;
+ event["pid"] = static_cast<int32_t>(storage->GetProcess(upid).pid);
+ event["tid"] =
+ legacy_tid ? legacy_tid
+ : static_cast<int32_t>(storage->GetProcess(upid).pid);
} else {
- event["id2"]["global"] = PrintUint64(track_id);
+ // Some legacy importers don't understand "id2" fields, so we use the
+ // "usually" global "id" field instead. This works as long as the
+ // event phase is not in {'N', 'D', 'O', '(', ')'}, see
+ // "LOCAL_ID_PHASES" in catapult.
+ event["id"] = PrintUint64(track_id);
}
}
@@ -606,20 +635,24 @@
event["ticount"] = Json::Int64(
(thread_instruction_count + thread_instruction_delta));
}
- event.removeMember("args");
+ event["args"].clear();
writer->WriteCommonEvent(event);
}
}
} else {
// Global or process-scoped instant event.
PERFETTO_DCHECK(duration_ns == 0);
- event["ph"] = "i";
+ // Use "I" instead of "i" phase for backwards-compat with old consumers.
+ event["ph"] = "I";
auto opt_process_row =
process_track.id().IndexOf(SqlValue::Long(track_id));
if (opt_process_row.has_value()) {
uint32_t upid = process_track.upid()[*opt_process_row];
- event["pid"] = storage->GetProcess(upid).pid;
+ event["pid"] = static_cast<int32_t>(storage->GetProcess(upid).pid);
+ event["tid"] =
+ legacy_tid ? legacy_tid
+ : static_cast<int32_t>(storage->GetProcess(upid).pid);
event["s"] = "p";
} else {
event["s"] = "g";
@@ -640,10 +673,10 @@
UniqueTid utid = static_cast<UniqueTid>(events.utids()[index]);
auto thread = storage->GetThread(utid);
- event["tid"] = thread.tid;
- if (thread.upid) {
- event["pid"] = storage->GetProcess(*thread.upid).pid;
- }
+ event["tid"] = static_cast<int32_t>(thread.tid);
+ event["pid"] = 0;
+ if (thread.upid)
+ event["pid"] = static_cast<int32_t>(storage->GetProcess(*thread.upid).pid);
// Raw legacy events store all other params in the arg set. Make a copy of
// the converted args here, parse, and then remove the legacy params.
@@ -659,6 +692,13 @@
PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventPhaseKey));
event["ph"] = legacy_args[kLegacyEventPhaseKey];
+ // Object snapshot events are supposed to have a mandatory "snapshot" arg,
+ // which may be removed in trace processor if it is empty.
+ if (legacy_args[kLegacyEventPhaseKey] == "O" &&
+ !event["args"].isMember("snapshot")) {
+ event["args"]["snapshot"] = Json::Value(Json::objectValue);
+ }
+
if (legacy_args.isMember(kLegacyEventDurationNsKey))
event["dur"] = legacy_args[kLegacyEventDurationNsKey].asInt64() / 1000;
@@ -696,6 +736,9 @@
PrintUint64(legacy_args[kLegacyEventLocalIdKey].asUInt64());
}
+ if (legacy_args.isMember(kLegacyEventIdScopeKey))
+ event["scope"] = legacy_args[kLegacyEventIdScopeKey];
+
ConvertLegacyFlowEventArgs(legacy_args, &event);
event["args"].removeMember(kLegacyEventArgsKey);
@@ -750,14 +793,17 @@
UniqueTid utid = static_cast<UniqueTid>(samples.utids()[i]);
auto thread = storage->GetThread(utid);
- event["tid"] = thread.tid;
- if (thread.upid)
- event["pid"] = storage->GetProcess(*thread.upid).pid;
+ event["tid"] = static_cast<int32_t>(thread.tid);
+ if (thread.upid) {
+ event["pid"] =
+ static_cast<int32_t>(storage->GetProcess(*thread.upid).pid);
+ }
+ // Use "I" instead of "i" phase for backwards-compat with old consumers.
event["ph"] = "I";
event["cat"] = "disabled_by_default-cpu_profiler";
event["name"] = "StackCpuSampling";
- event["scope"] = "t";
+ event["s"] = "t";
std::vector<std::string> callstack;
const auto& callsites = storage->stack_profile_callsite_table();
diff --git a/src/trace_processor/export_json_unittest.cc b/src/trace_processor/export_json_unittest.cc
index 679ef22..c5c949a 100644
--- a/src/trace_processor/export_json_unittest.cc
+++ b/src/trace_processor/export_json_unittest.cc
@@ -737,7 +737,7 @@
EXPECT_EQ(result["traceEvents"].size(), 1u);
Json::Value event = result["traceEvents"][0];
- EXPECT_EQ(event["ph"].asString(), "i");
+ EXPECT_EQ(event["ph"].asString(), "I");
EXPECT_EQ(event["ts"].asInt64(), kTimestamp / 1000);
EXPECT_EQ(event["s"].asString(), "g");
EXPECT_EQ(event["cat"].asString(), kCategory);
@@ -770,7 +770,7 @@
Json::Value event = result["traceEvents"][0];
EXPECT_EQ(event["tid"].asUInt(), kThreadID);
- EXPECT_EQ(event["ph"].asString(), "i");
+ EXPECT_EQ(event["ph"].asString(), "I");
EXPECT_EQ(event["ts"].asInt64(), kTimestamp / 1000);
EXPECT_EQ(event["s"].asString(), "t");
EXPECT_EQ(event["cat"].asString(), kCategory);
@@ -835,7 +835,8 @@
EXPECT_EQ(end_event["id2"]["local"].asString(), "0xeb");
EXPECT_EQ(end_event["cat"].asString(), kCategory);
EXPECT_EQ(end_event["name"].asString(), kName);
- EXPECT_FALSE(end_event.isMember("args"));
+ EXPECT_TRUE(end_event["args"].isObject());
+ EXPECT_EQ(end_event["args"].size(), 0u);
EXPECT_FALSE(end_event.isMember("tts"));
EXPECT_FALSE(end_event.isMember("use_async_tts"));
}
@@ -1193,7 +1194,7 @@
EXPECT_EQ(event["tid"].asUInt(), kThreadID);
EXPECT_EQ(event["cat"].asString(), "disabled_by_default-cpu_profiler");
EXPECT_EQ(event["name"].asString(), "StackCpuSampling");
- EXPECT_EQ(event["scope"].asString(), "t");
+ EXPECT_EQ(event["s"].asString(), "t");
EXPECT_EQ(event["args"]["frames"].asString(),
"foo_func - foo_module_name [foo_module_id]\nbar_func - "
"bar_module_name [bar_module_id]\n");
diff --git a/src/trace_processor/importers/proto/proto_trace_parser_unittest.cc b/src/trace_processor/importers/proto/proto_trace_parser_unittest.cc
index 8d029f5..f53c951 100644
--- a/src/trace_processor/importers/proto/proto_trace_parser_unittest.cc
+++ b/src/trace_processor/importers/proto/proto_trace_parser_unittest.cc
@@ -1048,7 +1048,6 @@
TraceStorage::Thread thread(16);
thread.upid = 2u;
EXPECT_CALL(*storage_, GetThread(1))
- .Times(5)
.WillRepeatedly(testing::ReturnRef(thread));
MockArgsTracker args(&context_);
@@ -1068,12 +1067,11 @@
InSequence in_sequence; // Below slices should be sorted by timestamp.
RowId first_slice_row_id =
- TraceStorage::CreateRowId(TableId::kNestableSlices, 3u);
+ TraceStorage::CreateRowId(TableId::kNestableSlices, 0u);
EXPECT_CALL(*slice_, Scoped(1005000, thread_1_track, 1, RefType::kRefUtid,
StringId(1), StringId(2), 23000, _))
.WillOnce(
DoAll(InvokeArgument<7>(&args, first_slice_row_id), Return(0u)));
-
EXPECT_CALL(
args, AddArg(first_slice_row_id, _, _, Variadic::UnsignedInteger(9999u)));
EXPECT_CALL(args, AddArg(first_slice_row_id, _, _, Variadic::Boolean(true)));
@@ -1100,12 +1098,13 @@
&args, TraceStorage::CreateRowId(TableId::kNestableSlices, 2u)),
Return(2u)));
+ RowId last_slice_row_id =
+ TraceStorage::CreateRowId(TableId::kNestableSlices, 3u);
EXPECT_CALL(*slice_, Scoped(1050000, process_2_track, 2, RefType::kRefUpid,
StringId(3), StringId(4), 0, _))
- .WillOnce(DoAll(
- InvokeArgument<7>(
- &args, TraceStorage::CreateRowId(TableId::kNestableSlices, 3u)),
- Return(3u)));
+ .WillOnce(DoAll(InvokeArgument<7>(&args, last_slice_row_id), Return(3u)));
+ // Second slice should have a legacy_event.original_tid arg.
+ EXPECT_CALL(args, AddArg(last_slice_row_id, _, _, Variadic::Integer(16)));
context_.sorter->ExtractEventsForced();
diff --git a/src/trace_processor/importers/proto/track_event_parser.cc b/src/trace_processor/importers/proto/track_event_parser.cc
index 166d090..721775c 100644
--- a/src/trace_processor/importers/proto/track_event_parser.cc
+++ b/src/trace_processor/importers/proto/track_event_parser.cc
@@ -55,6 +55,8 @@
context->storage->InternString("track_event.log_message")),
raw_legacy_event_id_(
context->storage->InternString("track_event.legacy_event")),
+ legacy_event_original_tid_id_(
+ context->storage->InternString("legacy_event.original_tid")),
legacy_event_category_key_id_(
context->storage->InternString("legacy_event.category")),
legacy_event_name_key_id_(
@@ -233,6 +235,12 @@
track_id = track_tracker->GetOrCreateDefaultDescriptorTrack();
}
+ // All events in legacy JSON require a thread ID, but for some types of events
+ // (e.g. async events or process/global-scoped instants), we don't store it in
+ // the slice/track model. To pass the original tid through to the json export,
+ // we store it in an arg.
+ uint32_t legacy_tid = 0;
+
// TODO(eseckler): Replace phase with type and remove handling of
// legacy_event.phase() once it is no longer used by producers.
int32_t phase = 0;
@@ -278,6 +286,8 @@
track_id = context_->track_tracker->InternLegacyChromeAsyncTrack(
name_id, upid ? *upid : 0, source_id, source_id_is_process_scoped,
id_scope);
+ if (utid)
+ legacy_tid = storage->GetThread(*utid).tid;
break;
}
case 'i':
@@ -298,6 +308,8 @@
case LegacyEvent::SCOPE_GLOBAL:
track_id = context_->track_tracker
->GetOrCreateLegacyChromeGlobalInstantTrack();
+ if (utid)
+ legacy_tid = storage->GetThread(*utid).tid;
break;
case LegacyEvent::SCOPE_PROCESS:
if (!upid) {
@@ -310,6 +322,8 @@
track_id =
context_->track_tracker->InternLegacyChromeProcessInstantTrack(
*upid);
+ if (utid)
+ legacy_tid = storage->GetThread(*utid).tid;
break;
}
break;
@@ -335,8 +349,8 @@
}
auto args_callback = [this, &event, &legacy_event, &sequence_state,
- sequence_state_generation, ts,
- utid](ArgsTracker* args_tracker, RowId row_id) {
+ sequence_state_generation, ts, utid,
+ legacy_tid](ArgsTracker* args_tracker, RowId row_id) {
for (auto it = event.debug_annotations(); it; ++it) {
ParseDebugAnnotationArgs(*it, sequence_state, sequence_state_generation,
args_tracker, row_id);
@@ -353,6 +367,12 @@
row_id);
}
+ if (legacy_tid) {
+ args_tracker->AddArg(row_id, legacy_event_original_tid_id_,
+ legacy_event_original_tid_id_,
+ Variadic::Integer(static_cast<int32_t>(legacy_tid)));
+ }
+
// TODO(eseckler): Parse legacy flow events into flow events table once we
// have a design for it.
if (legacy_event.has_bind_id()) {
diff --git a/src/trace_processor/importers/proto/track_event_parser.h b/src/trace_processor/importers/proto/track_event_parser.h
index 6579fbe..0aa072d 100644
--- a/src/trace_processor/importers/proto/track_event_parser.h
+++ b/src/trace_processor/importers/proto/track_event_parser.h
@@ -84,6 +84,7 @@
const StringId task_line_number_args_key_id_;
const StringId log_message_body_key_id_;
const StringId raw_legacy_event_id_;
+ const StringId legacy_event_original_tid_id_;
const StringId legacy_event_category_key_id_;
const StringId legacy_event_name_key_id_;
const StringId legacy_event_phase_key_id_;