1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "perfetto/profiling/pprof_builder.h"
18
19 #include "perfetto/base/build_config.h"
20
21 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
22 #include <cxxabi.h>
23 #endif
24
25 #include <algorithm>
26 #include <cinttypes>
27 #include <map>
28 #include <set>
29 #include <unordered_map>
30 #include <vector>
31
32 #include "perfetto/base/logging.h"
33 #include "perfetto/ext/base/hash.h"
34 #include "perfetto/ext/base/string_utils.h"
35 #include "perfetto/ext/base/utils.h"
36 #include "perfetto/protozero/packed_repeated_fields.h"
37 #include "perfetto/protozero/scattered_heap_buffer.h"
38 #include "perfetto/trace_processor/trace_processor.h"
39 #include "src/trace_processor/containers/string_pool.h"
40 #include "src/traceconv/utils.h"
41
42 #include "protos/third_party/pprof/profile.pbzero.h"
43
44 // Quick hint on navigating the file:
45 // Conversions for both perf and heap profiles start with |TraceToPprof|.
46 // Non-shared logic is in the |heap_profile| and |perf_profile| namespaces.
47 //
48 // To build one or more profiles, first the callstack information is queried
49 // from the SQL tables, and converted into an in-memory representation by
50 // |PreprocessLocations|. Then an instance of |GProfileBuilder| is used to
51 // accumulate samples for that profile, and emit all additional information as a
52 // serialized proto. Only the entities referenced by that particular
53 // |GProfileBuilder| instance are emitted.
54 //
55 // See protos/third_party/pprof/profile.proto for the meaning of terms like
56 // function/location/line.
57
58 namespace {
59 using StringId = ::perfetto::trace_processor::StringPool::Id;
60
61 // In-memory representation of a Profile.Function.
62 struct Function {
63 StringId name_id = StringId::Null();
64 StringId system_name_id = StringId::Null();
65 StringId filename_id = StringId::Null();
66
Function__anon3e98f22b0111::Function67 Function(StringId n, StringId s, StringId f)
68 : name_id(n), system_name_id(s), filename_id(f) {}
69
operator ==__anon3e98f22b0111::Function70 bool operator==(const Function& other) const {
71 return std::tie(name_id, system_name_id, filename_id) ==
72 std::tie(other.name_id, other.system_name_id, other.filename_id);
73 }
74 };
75
76 // In-memory representation of a Profile.Line.
77 struct Line {
78 int64_t function_id = 0; // LocationTracker's interned Function id
79 int64_t line_no = 0;
80
Line__anon3e98f22b0111::Line81 Line(int64_t func, int64_t line) : function_id(func), line_no(line) {}
82
operator ==__anon3e98f22b0111::Line83 bool operator==(const Line& other) const {
84 return function_id == other.function_id && line_no == other.line_no;
85 }
86 };
87
88 // In-memory representation of a Profile.Location.
89 struct Location {
90 int64_t mapping_id = 0; // sqlite row id
91 // Common case: location references a single function.
92 int64_t single_function_id = 0; // interned Function id
93 // Alternatively: multiple inlined functions, recovered via offline
94 // symbolisation. Leaf-first ordering.
95 std::vector<Line> inlined_functions;
96
Location__anon3e98f22b0111::Location97 Location(int64_t map, int64_t func, std::vector<Line> inlines)
98 : mapping_id(map),
99 single_function_id(func),
100 inlined_functions(std::move(inlines)) {}
101
operator ==__anon3e98f22b0111::Location102 bool operator==(const Location& other) const {
103 return std::tie(mapping_id, single_function_id, inlined_functions) ==
104 std::tie(other.mapping_id, other.single_function_id,
105 other.inlined_functions);
106 }
107 };
108 } // namespace
109
110 template <>
111 struct std::hash<Function> {
operator ()std::hash112 size_t operator()(const Function& loc) const {
113 perfetto::base::Hasher hasher;
114 hasher.Update(loc.name_id.raw_id());
115 hasher.Update(loc.system_name_id.raw_id());
116 hasher.Update(loc.filename_id.raw_id());
117 return static_cast<size_t>(hasher.digest());
118 }
119 };
120
121 template <>
122 struct std::hash<Location> {
operator ()std::hash123 size_t operator()(const Location& loc) const {
124 perfetto::base::Hasher hasher;
125 hasher.Update(loc.mapping_id);
126 hasher.Update(loc.single_function_id);
127 for (auto line : loc.inlined_functions) {
128 hasher.Update(line.function_id);
129 hasher.Update(line.line_no);
130 }
131 return static_cast<size_t>(hasher.digest());
132 }
133 };
134
135 namespace perfetto {
136 namespace trace_to_text {
137 namespace {
138
139 using ::perfetto::trace_processor::Iterator;
140
ToPprofId(int64_t id)141 uint64_t ToPprofId(int64_t id) {
142 PERFETTO_DCHECK(id >= 0);
143 return static_cast<uint64_t>(id) + 1;
144 }
145
AsCsvString(std::vector<uint64_t> vals)146 std::string AsCsvString(std::vector<uint64_t> vals) {
147 std::string ret;
148 for (size_t i = 0; i < vals.size(); i++) {
149 if (i != 0) {
150 ret += ",";
151 }
152 ret += std::to_string(vals[i]);
153 }
154 return ret;
155 }
156
GetStatsEntry(trace_processor::TraceProcessor * tp,const std::string & name,std::optional<uint64_t> idx=std::nullopt)157 std::optional<int64_t> GetStatsEntry(
158 trace_processor::TraceProcessor* tp,
159 const std::string& name,
160 std::optional<uint64_t> idx = std::nullopt) {
161 std::string query = "select value from stats where name == '" + name + "'";
162 if (idx.has_value())
163 query += " and idx == " + std::to_string(idx.value());
164
165 auto it = tp->ExecuteQuery(query);
166 if (!it.Next()) {
167 if (!it.Status().ok()) {
168 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
169 it.Status().message().c_str());
170 return std::nullopt;
171 }
172 // some stats are not present unless non-zero
173 return std::make_optional(0);
174 }
175 return std::make_optional(it.Get(0).AsLong());
176 }
177
178 // Interns Locations, Lines, and Functions. Interning is done by the entity's
179 // contents, and has no relation to the row ids in the SQL tables.
180 // Contains all data for the trace, so can be reused when emitting multiple
181 // profiles.
182 //
183 // TODO(rsavitski): consider moving mappings into here as well. For now, they're
184 // still emitted in a single scan during profile building. Mappings should be
185 // unique-enough already in the SQL tables, with only incremental state clearing
186 // duplicating entries.
187 class LocationTracker {
188 public:
InternLocation(Location loc)189 int64_t InternLocation(Location loc) {
190 auto it = locations_.find(loc);
191 if (it == locations_.end()) {
192 bool inserted = false;
193 std::tie(it, inserted) = locations_.emplace(
194 std::move(loc), static_cast<int64_t>(locations_.size()));
195 PERFETTO_DCHECK(inserted);
196 }
197 return it->second;
198 }
199
InternFunction(Function func)200 int64_t InternFunction(Function func) {
201 auto it = functions_.find(func);
202 if (it == functions_.end()) {
203 bool inserted = false;
204 std::tie(it, inserted) =
205 functions_.emplace(func, static_cast<int64_t>(functions_.size()));
206 PERFETTO_DCHECK(inserted);
207 }
208 return it->second;
209 }
210
IsCallsiteProcessed(int64_t callstack_id) const211 bool IsCallsiteProcessed(int64_t callstack_id) const {
212 return callsite_to_locations_.find(callstack_id) !=
213 callsite_to_locations_.end();
214 }
215
MaybeSetCallsiteLocations(int64_t callstack_id,const std::vector<int64_t> & locs)216 void MaybeSetCallsiteLocations(int64_t callstack_id,
217 const std::vector<int64_t>& locs) {
218 // nop if already set
219 callsite_to_locations_.emplace(callstack_id, locs);
220 }
221
LocationsForCallstack(int64_t callstack_id) const222 const std::vector<int64_t>& LocationsForCallstack(
223 int64_t callstack_id) const {
224 auto it = callsite_to_locations_.find(callstack_id);
225 PERFETTO_CHECK(callstack_id >= 0 && it != callsite_to_locations_.end());
226 return it->second;
227 }
228
AllLocations() const229 const std::unordered_map<Location, int64_t>& AllLocations() const {
230 return locations_;
231 }
AllFunctions() const232 const std::unordered_map<Function, int64_t>& AllFunctions() const {
233 return functions_;
234 }
235
236 private:
237 // Root-first location ids for a given callsite id.
238 std::unordered_map<int64_t, std::vector<int64_t>> callsite_to_locations_;
239 std::unordered_map<Location, int64_t> locations_;
240 std::unordered_map<Function, int64_t> functions_;
241 };
242
243 struct PreprocessedInline {
244 // |name_id| is already demangled
245 StringId name_id = StringId::Null();
246 StringId filename_id = StringId::Null();
247 int64_t line_no = 0;
248
PreprocessedInlineperfetto::trace_to_text::__anon3e98f22b0211::PreprocessedInline249 PreprocessedInline(StringId s, StringId f, int64_t line)
250 : name_id(s), filename_id(f), line_no(line) {}
251 };
252
253 std::unordered_map<int64_t, std::vector<PreprocessedInline>>
PreprocessInliningInfo(trace_processor::TraceProcessor * tp,trace_processor::StringPool * interner)254 PreprocessInliningInfo(trace_processor::TraceProcessor* tp,
255 trace_processor::StringPool* interner) {
256 std::unordered_map<int64_t, std::vector<PreprocessedInline>> inlines;
257
258 // Most-inlined function (leaf) has the lowest id within a symbol set. Query
259 // such that the per-set line vectors are built up leaf-first.
260 Iterator it = tp->ExecuteQuery(
261 "select symbol_set_id, name, source_file, line_number from "
262 "stack_profile_symbol order by symbol_set_id asc, id asc;");
263 while (it.Next()) {
264 int64_t symbol_set_id = it.Get(0).AsLong();
265 auto func_sysname = it.Get(1).is_null() ? "" : it.Get(1).AsString();
266 auto filename = it.Get(2).is_null() ? "" : it.Get(2).AsString();
267 int64_t line_no = it.Get(3).is_null() ? 0 : it.Get(3).AsLong();
268
269 inlines[symbol_set_id].emplace_back(interner->InternString(func_sysname),
270 interner->InternString(filename),
271 line_no);
272 }
273
274 if (!it.Status().ok()) {
275 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
276 it.Status().message().c_str());
277 return {};
278 }
279 return inlines;
280 }
281
282 // Extracts and interns the unique frames and locations (as defined by the proto
283 // format) from the callstack SQL tables.
284 //
285 // Approach:
286 // * for each callstack (callsite ids of the leaves):
287 // * use experimental_annotated_callstack to build the full list of
288 // constituent frames
289 // * for each frame (root to leaf):
290 // * intern the location and function(s)
291 // * remember the mapping from callsite_id to the callstack so far (from
292 // the root and including the frame being considered)
293 //
294 // Optionally mixes in the annotations as a frame name suffix (since there's no
295 // good way to attach extra info to locations in the proto format). This relies
296 // on the annotations (produced by experimental_annotated_callstack) to be
297 // stable for a given callsite (equivalently: dependent only on their parents).
PreprocessLocations(trace_processor::TraceProcessor * tp,trace_processor::StringPool * interner,bool annotate_frames)298 LocationTracker PreprocessLocations(trace_processor::TraceProcessor* tp,
299 trace_processor::StringPool* interner,
300 bool annotate_frames) {
301 LocationTracker tracker;
302
303 // Keyed by symbol_set_id, discarded once this function converts the inlines
304 // into Line and Function entries.
305 std::unordered_map<int64_t, std::vector<PreprocessedInline>> inlining_info =
306 PreprocessInliningInfo(tp, interner);
307
308 // Higher callsite ids most likely correspond to the deepest stacks, so we'll
309 // fill more of the overall callsite->location map by visiting the callsited
310 // in decreasing id order. Since processing a callstack also fills in the data
311 // for all parent callsites.
312 Iterator cid_it = tp->ExecuteQuery(
313 "select id from stack_profile_callsite order by id desc;");
314 while (cid_it.Next()) {
315 int64_t query_cid = cid_it.Get(0).AsLong();
316
317 // If the leaf has been processed, the rest of the stack is already known.
318 if (tracker.IsCallsiteProcessed(query_cid))
319 continue;
320
321 std::string annotated_query =
322 "select sp.id, sp.annotation, spf.mapping, spf.name, "
323 "coalesce(spf.deobfuscated_name, demangle(spf.name), spf.name), "
324 "spf.symbol_set_id from "
325 "experimental_annotated_callstack(" +
326 std::to_string(query_cid) +
327 ") sp join stack_profile_frame spf on (sp.frame_id == spf.id) "
328 "order by depth asc";
329 Iterator c_it = tp->ExecuteQuery(annotated_query);
330
331 std::vector<int64_t> callstack_loc_ids;
332 while (c_it.Next()) {
333 int64_t cid = c_it.Get(0).AsLong();
334 auto annotation = c_it.Get(1).is_null() ? "" : c_it.Get(1).AsString();
335 int64_t mapping_id = c_it.Get(2).AsLong();
336 auto func_sysname = c_it.Get(3).is_null() ? "" : c_it.Get(3).AsString();
337 auto func_name = c_it.Get(4).is_null() ? "" : c_it.Get(4).AsString();
338 std::optional<int64_t> symbol_set_id =
339 c_it.Get(5).is_null() ? std::nullopt
340 : std::make_optional(c_it.Get(5).AsLong());
341
342 Location loc(mapping_id, /*single_function_id=*/-1, {});
343
344 auto intern_function = [interner, &tracker, annotate_frames](
345 StringId func_sysname_id,
346 StringId original_func_name_id,
347 StringId filename_id,
348 const std::string& anno) {
349 std::string fname = interner->Get(original_func_name_id).ToStdString();
350 if (annotate_frames && !anno.empty() && !fname.empty())
351 fname = fname + " [" + anno + "]";
352 StringId func_name_id = interner->InternString(base::StringView(fname));
353 Function func(func_name_id, func_sysname_id, filename_id);
354 return tracker.InternFunction(func);
355 };
356
357 // Inlining information available
358 if (symbol_set_id.has_value()) {
359 auto it = inlining_info.find(*symbol_set_id);
360 if (it == inlining_info.end()) {
361 PERFETTO_DFATAL_OR_ELOG(
362 "Failed to find stack_profile_symbol entry for symbol_set_id "
363 "%" PRIi64 "",
364 *symbol_set_id);
365 return {};
366 }
367
368 // N inlined functions
369 // The symbolised packets currently assume pre-demangled data (as that's
370 // the default of llvm-symbolizer), so we don't have a system name for
371 // each deinlined frame. Set the human-readable name for both fields. We
372 // can change this, but there's no demand for accurate system names in
373 // pprofs.
374 for (const auto& line : it->second) {
375 int64_t func_id = intern_function(line.name_id, line.name_id,
376 line.filename_id, annotation);
377
378 loc.inlined_functions.emplace_back(func_id, line.line_no);
379 }
380 } else {
381 // Otherwise - single function
382 int64_t func_id =
383 intern_function(interner->InternString(func_sysname),
384 interner->InternString(func_name),
385 /*filename_id=*/StringId::Null(), annotation);
386 loc.single_function_id = func_id;
387 }
388
389 int64_t loc_id = tracker.InternLocation(std::move(loc));
390
391 // Update the tracker with the locations so far (for example, at depth 2,
392 // we'll have 3 root-most locations in |callstack_loc_ids|).
393 callstack_loc_ids.push_back(loc_id);
394 tracker.MaybeSetCallsiteLocations(cid, callstack_loc_ids);
395 }
396
397 if (!c_it.Status().ok()) {
398 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
399 c_it.Status().message().c_str());
400 return {};
401 }
402 }
403
404 if (!cid_it.Status().ok()) {
405 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
406 cid_it.Status().message().c_str());
407 return {};
408 }
409
410 return tracker;
411 }
412
413 // Builds the |perftools.profiles.Profile| proto.
414 class GProfileBuilder {
415 public:
GProfileBuilder(const LocationTracker & locations,trace_processor::StringPool * interner)416 GProfileBuilder(const LocationTracker& locations,
417 trace_processor::StringPool* interner)
418 : locations_(locations), interner_(interner) {
419 // The pprof format requires the first entry in the string table to be the
420 // empty string.
421 int64_t empty_id = ToStringTableId(StringId::Null());
422 PERFETTO_CHECK(empty_id == 0);
423 }
424
WriteSampleTypes(const std::vector<std::pair<std::string,std::string>> & sample_types)425 void WriteSampleTypes(
426 const std::vector<std::pair<std::string, std::string>>& sample_types) {
427 for (const auto& st : sample_types) {
428 auto* sample_type = result_->add_sample_type();
429 sample_type->set_type(
430 ToStringTableId(interner_->InternString(base::StringView(st.first))));
431 sample_type->set_unit(ToStringTableId(
432 interner_->InternString(base::StringView(st.second))));
433 }
434 }
435
AddSample(const protozero::PackedVarInt & values,int64_t callstack_id)436 bool AddSample(const protozero::PackedVarInt& values, int64_t callstack_id) {
437 const auto& location_ids = locations_.LocationsForCallstack(callstack_id);
438 if (location_ids.empty()) {
439 PERFETTO_DFATAL_OR_ELOG(
440 "Failed to find frames for callstack id %" PRIi64 "", callstack_id);
441 return false;
442 }
443
444 // LocationTracker stores location lists root-first, but the pprof format
445 // requires leaf-first.
446 protozero::PackedVarInt packed_locs;
447 for (auto it = location_ids.rbegin(); it != location_ids.rend(); ++it)
448 packed_locs.Append(ToPprofId(*it));
449
450 auto* gsample = result_->add_sample();
451 gsample->set_value(values);
452 gsample->set_location_id(packed_locs);
453
454 // Remember the locations s.t. we only serialize the referenced ones.
455 seen_locations_.insert(location_ids.cbegin(), location_ids.cend());
456 return true;
457 }
458
CompleteProfile(trace_processor::TraceProcessor * tp,bool write_mappings=true)459 std::string CompleteProfile(trace_processor::TraceProcessor* tp,
460 bool write_mappings = true) {
461 std::set<int64_t> seen_mappings;
462 std::set<int64_t> seen_functions;
463
464 if (!WriteLocations(&seen_mappings, &seen_functions))
465 return {};
466 if (!WriteFunctions(seen_functions))
467 return {};
468 if (write_mappings && !WriteMappings(tp, seen_mappings))
469 return {};
470
471 WriteStringTable();
472 return result_.SerializeAsString();
473 }
474
475 private:
476 // Serializes the Profile.Location entries referenced by this profile.
WriteLocations(std::set<int64_t> * seen_mappings,std::set<int64_t> * seen_functions)477 bool WriteLocations(std::set<int64_t>* seen_mappings,
478 std::set<int64_t>* seen_functions) {
479 const std::unordered_map<Location, int64_t>& locations =
480 locations_.AllLocations();
481
482 size_t written_locations = 0;
483 for (const auto& loc_and_id : locations) {
484 const auto& loc = loc_and_id.first;
485 int64_t id = loc_and_id.second;
486
487 if (seen_locations_.find(id) == seen_locations_.end())
488 continue;
489
490 written_locations += 1;
491 seen_mappings->emplace(loc.mapping_id);
492
493 auto* glocation = result_->add_location();
494 glocation->set_id(ToPprofId(id));
495 glocation->set_mapping_id(ToPprofId(loc.mapping_id));
496
497 if (!loc.inlined_functions.empty()) {
498 for (const auto& line : loc.inlined_functions) {
499 seen_functions->insert(line.function_id);
500
501 auto* gline = glocation->add_line();
502 gline->set_function_id(ToPprofId(line.function_id));
503 gline->set_line(line.line_no);
504 }
505 } else {
506 seen_functions->insert(loc.single_function_id);
507
508 glocation->add_line()->set_function_id(
509 ToPprofId(loc.single_function_id));
510 }
511 }
512
513 if (written_locations != seen_locations_.size()) {
514 PERFETTO_DFATAL_OR_ELOG(
515 "Found only %zu/%zu locations during serialization.",
516 written_locations, seen_locations_.size());
517 return false;
518 }
519 return true;
520 }
521
522 // Serializes the Profile.Function entries referenced by this profile.
WriteFunctions(const std::set<int64_t> & seen_functions)523 bool WriteFunctions(const std::set<int64_t>& seen_functions) {
524 const std::unordered_map<Function, int64_t>& functions =
525 locations_.AllFunctions();
526
527 size_t written_functions = 0;
528 for (const auto& func_and_id : functions) {
529 const auto& func = func_and_id.first;
530 int64_t id = func_and_id.second;
531
532 if (seen_functions.find(id) == seen_functions.end())
533 continue;
534
535 written_functions += 1;
536
537 auto* gfunction = result_->add_function();
538 gfunction->set_id(ToPprofId(id));
539 gfunction->set_name(ToStringTableId(func.name_id));
540 gfunction->set_system_name(ToStringTableId(func.system_name_id));
541 if (!func.filename_id.is_null())
542 gfunction->set_filename(ToStringTableId(func.filename_id));
543 }
544
545 if (written_functions != seen_functions.size()) {
546 PERFETTO_DFATAL_OR_ELOG(
547 "Found only %zu/%zu functions during serialization.",
548 written_functions, seen_functions.size());
549 return false;
550 }
551 return true;
552 }
553
554 // Serializes the Profile.Mapping entries referenced by this profile.
WriteMappings(trace_processor::TraceProcessor * tp,const std::set<int64_t> & seen_mappings)555 bool WriteMappings(trace_processor::TraceProcessor* tp,
556 const std::set<int64_t>& seen_mappings) {
557 Iterator mapping_it = tp->ExecuteQuery(
558 "SELECT id, exact_offset, start, end, name, build_id "
559 "FROM stack_profile_mapping;");
560 size_t mappings_no = 0;
561 while (mapping_it.Next()) {
562 int64_t id = mapping_it.Get(0).AsLong();
563 if (seen_mappings.find(id) == seen_mappings.end())
564 continue;
565 ++mappings_no;
566 auto interned_filename = ToStringTableId(
567 interner_->InternString(mapping_it.Get(4).AsString()));
568 auto interned_build_id = ToStringTableId(
569 interner_->InternString(mapping_it.Get(5).AsString()));
570 auto* gmapping = result_->add_mapping();
571 gmapping->set_id(ToPprofId(id));
572 gmapping->set_file_offset(
573 static_cast<uint64_t>(mapping_it.Get(1).AsLong()));
574 gmapping->set_memory_start(
575 static_cast<uint64_t>(mapping_it.Get(2).AsLong()));
576 gmapping->set_memory_limit(
577 static_cast<uint64_t>(mapping_it.Get(3).AsLong()));
578 gmapping->set_filename(interned_filename);
579 gmapping->set_build_id(interned_build_id);
580 }
581 if (!mapping_it.Status().ok()) {
582 PERFETTO_DFATAL_OR_ELOG("Invalid mapping iterator: %s",
583 mapping_it.Status().message().c_str());
584 return false;
585 }
586 if (mappings_no != seen_mappings.size()) {
587 PERFETTO_DFATAL_OR_ELOG("Missing mappings.");
588 return false;
589 }
590 return true;
591 }
592
WriteStringTable()593 void WriteStringTable() {
594 for (StringId id : string_table_) {
595 trace_processor::NullTermStringView s = interner_->Get(id);
596 result_->add_string_table(s.data(), s.size());
597 }
598 }
599
ToStringTableId(StringId interned_id)600 int64_t ToStringTableId(StringId interned_id) {
601 auto it = interning_remapper_.find(interned_id);
602 if (it == interning_remapper_.end()) {
603 int64_t table_id = static_cast<int64_t>(string_table_.size());
604 string_table_.push_back(interned_id);
605 bool inserted = false;
606 std::tie(it, inserted) =
607 interning_remapper_.emplace(interned_id, table_id);
608 PERFETTO_DCHECK(inserted);
609 }
610 return it->second;
611 }
612
613 // Contains all locations, lines, functions (in memory):
614 const LocationTracker& locations_;
615
616 // String interner, strings referenced by LocationTracker are already
617 // interned. The new internings will come from mappings, and sample types.
618 trace_processor::StringPool* interner_;
619
620 // The profile format uses the repeated string_table field's index as an
621 // implicit id, so these structures remap the interned strings into sequential
622 // ids. Only the strings referenced by this GProfileBuilder instance will be
623 // added to the table.
624 std::unordered_map<StringId, int64_t> interning_remapper_;
625 std::vector<StringId> string_table_;
626
627 // Profile proto being serialized.
628 protozero::HeapBuffered<third_party::perftools::profiles::pbzero::Profile>
629 result_;
630
631 // Set of locations referenced by the added samples.
632 std::set<int64_t> seen_locations_;
633 };
634
635 namespace heap_profile {
636 struct View {
637 const char* type;
638 const char* unit;
639 const char* aggregator;
640 const char* filter;
641 };
642
643 const View kMallocViews[] = {
644 {"Total malloc count", "count", "sum(count)", "size >= 0"},
645 {"Total malloc size", "bytes", "SUM(size)", "size >= 0"},
646 {"Unreleased malloc count", "count", "SUM(count)", nullptr},
647 {"Unreleased malloc size", "bytes", "SUM(size)", nullptr}};
648
649 const View kGenericViews[] = {
650 {"Total count", "count", "sum(count)", "size >= 0"},
651 {"Total size", "bytes", "SUM(size)", "size >= 0"},
652 {"Unreleased count", "count", "SUM(count)", nullptr},
653 {"Unreleased size", "bytes", "SUM(size)", nullptr}};
654
655 const View kJavaSamplesViews[] = {
656 {"Total allocation count", "count", "SUM(count)", nullptr},
657 {"Total allocation size", "bytes", "SUM(size)", nullptr}};
658
VerifyPIDStats(trace_processor::TraceProcessor * tp,uint64_t pid)659 static bool VerifyPIDStats(trace_processor::TraceProcessor* tp, uint64_t pid) {
660 bool success = true;
661 std::optional<int64_t> stat =
662 GetStatsEntry(tp, "heapprofd_buffer_corrupted", std::make_optional(pid));
663 if (!stat.has_value()) {
664 PERFETTO_DFATAL_OR_ELOG("Failed to get heapprofd_buffer_corrupted stat");
665 } else if (stat.value() > 0) {
666 success = false;
667 PERFETTO_ELOG("WARNING: The profile for %" PRIu64
668 " ended early due to a buffer corruption."
669 " THIS IS ALWAYS A BUG IN HEAPPROFD OR"
670 " CLIENT MEMORY CORRUPTION.",
671 pid);
672 }
673 stat = GetStatsEntry(tp, "heapprofd_buffer_overran", std::make_optional(pid));
674 if (!stat.has_value()) {
675 PERFETTO_DFATAL_OR_ELOG("Failed to get heapprofd_buffer_overran stat");
676 } else if (stat.value() > 0) {
677 success = false;
678 PERFETTO_ELOG("WARNING: The profile for %" PRIu64
679 " ended early due to a buffer overrun.",
680 pid);
681 }
682
683 stat = GetStatsEntry(tp, "heapprofd_rejected_concurrent", pid);
684 if (!stat.has_value()) {
685 PERFETTO_DFATAL_OR_ELOG("Failed to get heapprofd_rejected_concurrent stat");
686 } else if (stat.value() > 0) {
687 success = false;
688 PERFETTO_ELOG("WARNING: The profile for %" PRIu64
689 " was rejected due to a concurrent profile.",
690 pid);
691 }
692 return success;
693 }
694
BuildViewIterators(trace_processor::TraceProcessor * tp,uint64_t upid,uint64_t ts,const char * heap_name,const std::vector<View> & views)695 static std::vector<Iterator> BuildViewIterators(
696 trace_processor::TraceProcessor* tp,
697 uint64_t upid,
698 uint64_t ts,
699 const char* heap_name,
700 const std::vector<View>& views) {
701 std::vector<Iterator> view_its;
702 for (const View& v : views) {
703 std::string query = "SELECT hpa.callsite_id ";
704 query +=
705 ", " + std::string(v.aggregator) + " FROM heap_profile_allocation hpa ";
706 // TODO(fmayer): Figure out where negative callsite_id comes from.
707 query += "WHERE hpa.callsite_id >= 0 ";
708 query += "AND hpa.upid = " + std::to_string(upid) + " ";
709 query += "AND hpa.ts <= " + std::to_string(ts) + " ";
710 query += "AND hpa.heap_name = '" + std::string(heap_name) + "' ";
711 if (v.filter)
712 query += "AND " + std::string(v.filter) + " ";
713 query += "GROUP BY hpa.callsite_id;";
714 view_its.emplace_back(tp->ExecuteQuery(query));
715 }
716 return view_its;
717 }
718
WriteAllocations(GProfileBuilder * builder,std::vector<Iterator> * view_its)719 static bool WriteAllocations(GProfileBuilder* builder,
720 std::vector<Iterator>* view_its) {
721 for (;;) {
722 bool all_next = true;
723 bool any_next = false;
724 for (size_t i = 0; i < view_its->size(); ++i) {
725 Iterator& it = (*view_its)[i];
726 bool next = it.Next();
727 if (!it.Status().ok()) {
728 PERFETTO_DFATAL_OR_ELOG("Invalid view iterator: %s",
729 it.Status().message().c_str());
730 return false;
731 }
732 all_next = all_next && next;
733 any_next = any_next || next;
734 }
735
736 if (!all_next) {
737 PERFETTO_CHECK(!any_next);
738 break;
739 }
740
741 protozero::PackedVarInt sample_values;
742 int64_t callstack_id = -1;
743 for (size_t i = 0; i < view_its->size(); ++i) {
744 if (i == 0) {
745 callstack_id = (*view_its)[i].Get(0).AsLong();
746 } else if (callstack_id != (*view_its)[i].Get(0).AsLong()) {
747 PERFETTO_DFATAL_OR_ELOG("Wrong callstack.");
748 return false;
749 }
750 sample_values.Append((*view_its)[i].Get(1).AsLong());
751 }
752
753 if (!builder->AddSample(sample_values, callstack_id))
754 return false;
755 }
756 return true;
757 }
758
TraceToHeapPprof(trace_processor::TraceProcessor * tp,std::vector<SerializedProfile> * output,bool annotate_frames,uint64_t target_pid,const std::vector<uint64_t> & target_timestamps)759 static bool TraceToHeapPprof(trace_processor::TraceProcessor* tp,
760 std::vector<SerializedProfile>* output,
761 bool annotate_frames,
762 uint64_t target_pid,
763 const std::vector<uint64_t>& target_timestamps) {
764 trace_processor::StringPool interner;
765 LocationTracker locations =
766 PreprocessLocations(tp, &interner, annotate_frames);
767
768 bool any_fail = false;
769 Iterator it = tp->ExecuteQuery(
770 "select distinct hpa.upid, hpa.ts, p.pid, hpa.heap_name "
771 "from heap_profile_allocation hpa, "
772 "process p where p.upid = hpa.upid;");
773 while (it.Next()) {
774 GProfileBuilder builder(locations, &interner);
775 uint64_t upid = static_cast<uint64_t>(it.Get(0).AsLong());
776 uint64_t ts = static_cast<uint64_t>(it.Get(1).AsLong());
777 uint64_t profile_pid = static_cast<uint64_t>(it.Get(2).AsLong());
778 const char* heap_name = it.Get(3).AsString();
779 if ((target_pid > 0 && profile_pid != target_pid) ||
780 (!target_timestamps.empty() &&
781 std::find(target_timestamps.begin(), target_timestamps.end(), ts) ==
782 target_timestamps.end())) {
783 continue;
784 }
785
786 if (!VerifyPIDStats(tp, profile_pid))
787 any_fail = true;
788
789 std::vector<View> views;
790 if (base::StringView(heap_name) == "libc.malloc") {
791 views.assign(std::begin(kMallocViews), std::end(kMallocViews));
792 } else if (base::StringView(heap_name) == "com.android.art") {
793 views.assign(std::begin(kJavaSamplesViews), std::end(kJavaSamplesViews));
794 } else {
795 views.assign(std::begin(kGenericViews), std::end(kGenericViews));
796 }
797
798 std::vector<std::pair<std::string, std::string>> sample_types;
799 for (const View& view : views) {
800 sample_types.emplace_back(view.type, view.unit);
801 }
802 builder.WriteSampleTypes(sample_types);
803
804 std::vector<Iterator> view_its =
805 BuildViewIterators(tp, upid, ts, heap_name, views);
806 std::string profile_proto;
807 if (WriteAllocations(&builder, &view_its)) {
808 profile_proto = builder.CompleteProfile(tp);
809 }
810 output->emplace_back(
811 SerializedProfile{ProfileType::kHeapProfile, profile_pid,
812 std::move(profile_proto), heap_name});
813 }
814
815 if (!it.Status().ok()) {
816 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
817 it.Status().message().c_str());
818 return false;
819 }
820 if (any_fail) {
821 PERFETTO_ELOG(
822 "One or more of your profiles had an issue. Please consult "
823 "https://perfetto.dev/docs/data-sources/"
824 "native-heap-profiler#troubleshooting");
825 }
826 return true;
827 }
828 } // namespace heap_profile
829
830 namespace java_heap_profile {
831 struct View {
832 const char* type;
833 const char* unit;
834 const char* query;
835 };
836
837 constexpr View kJavaAllocationViews[] = {
838 {"Total allocation count", "count", "count"},
839 {"Total allocation size", "bytes", "size"}};
840
CreateHeapDumpFlameGraphQuery(const std::string & columns,const uint64_t upid,const uint64_t ts)841 std::string CreateHeapDumpFlameGraphQuery(const std::string& columns,
842 const uint64_t upid,
843 const uint64_t ts) {
844 std::string query = "SELECT " + columns + " ";
845 query += "FROM experimental_flamegraph(";
846
847 const std::vector<std::string> query_params = {
848 // The type of the profile from which the flamegraph is being generated
849 // Always 'graph' for Java heap graphs.
850 "'graph'",
851 // Heapdump timestamp
852 std::to_string(ts),
853 // Timestamp constraints: not relevant and always null for Java heap
854 // graphs.
855 "NULL",
856 // The upid of the heap graph sample
857 std::to_string(upid),
858 // The upid group: not relevant and always null for Java heap graphs
859 "NULL",
860 // A regex for focusing on a particular node in the heapgraph
861 "NULL"};
862
863 query += base::Join(query_params, ", ");
864 query += ")";
865
866 return query;
867 }
868
WriteAllocations(GProfileBuilder * builder,const std::unordered_map<int64_t,std::vector<int64_t>> & view_values)869 bool WriteAllocations(
870 GProfileBuilder* builder,
871 const std::unordered_map<int64_t, std::vector<int64_t>>& view_values) {
872 for (const auto& [id, values] : view_values) {
873 protozero::PackedVarInt sample_values;
874 for (const int64_t value : values) {
875 sample_values.Append(value);
876 }
877 if (!builder->AddSample(sample_values, id)) {
878 return false;
879 }
880 }
881 return true;
882 }
883
884 // Extracts and interns the unique locations from the heap dump SQL tables.
885 //
886 // It uses experimental_flamegraph table to get normalized representation of
887 // the heap graph as a tree, which always takes the shortest path to the root.
888 //
889 // Approach:
890 // * First we iterate over all heap dump flamegraph rows and create a map
891 // of flamegraph item id -> flamegraph item parent_id, each flamechart
892 // item is converted to a Location where we populate Function name using
893 // the name of the class (as opposed to using actual call function as
894 // allocation call stack is not available for java heap dumps).
895 // Also populate view_values straightaway here to not iterate over the data
896 // again in the future.
897 // * For each location we iterate over all its parents until we find
898 // the root and use this list of locations as a 'callstack' (which is
899 // actually list of class names)
PreprocessLocationsForJavaHeap(trace_processor::TraceProcessor * tp,trace_processor::StringPool * interner,const std::vector<View> & views,std::unordered_map<int64_t,std::vector<int64_t>> & view_values_out,uint64_t upid,uint64_t ts)900 LocationTracker PreprocessLocationsForJavaHeap(
901 trace_processor::TraceProcessor* tp,
902 trace_processor::StringPool* interner,
903 const std::vector<View>& views,
904 std::unordered_map<int64_t, std::vector<int64_t>>& view_values_out,
905 uint64_t upid,
906 uint64_t ts) {
907 LocationTracker tracker;
908
909 std::string columns;
910 for (const auto& view : views) {
911 columns += std::string(view.query) + ", ";
912 }
913
914 const auto data_columns_count = static_cast<uint32_t>(views.size());
915 columns += "id, parent_id, name";
916
917 const std::string query = CreateHeapDumpFlameGraphQuery(columns, upid, ts);
918 Iterator it = tp->ExecuteQuery(query);
919
920 // flamegraph id -> flamegraph parent_id
921 std::unordered_map<int64_t, int64_t> parents;
922 // flamegraph id -> interned location id
923 std::unordered_map<int64_t, int64_t> interned_ids;
924
925 // Create locations
926 while (it.Next()) {
927 const int64_t id = it.Get(data_columns_count).AsLong();
928
929 const int64_t parent_id = it.Get(data_columns_count + 1).is_null()
930 ? -1
931 : it.Get(data_columns_count + 1).AsLong();
932
933 auto name = it.Get(data_columns_count + 2).is_null()
934 ? ""
935 : it.Get(data_columns_count + 2).AsString();
936
937 parents.emplace(id, parent_id);
938
939 StringId func_name_id = interner->InternString(name);
940 Function func(func_name_id, StringId::Null(), StringId::Null());
941 auto interned_function_id = tracker.InternFunction(func);
942
943 Location loc(/*map=*/0, /*func=*/interned_function_id, /*inlines=*/{});
944 auto interned_location_id = tracker.InternLocation(std::move(loc));
945
946 interned_ids.emplace(id, interned_location_id);
947
948 std::vector<int64_t> view_values_vector;
949 for (uint32_t i = 0; i < views.size(); ++i) {
950 view_values_vector.push_back(it.Get(i).AsLong());
951 }
952
953 view_values_out.emplace(id, view_values_vector);
954 }
955
956 if (!it.Status().ok()) {
957 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
958 it.Status().message().c_str());
959 return {};
960 }
961
962 // Iterate over all known locations again and build root-first paths
963 // for every location
964 for (auto& parent : parents) {
965 std::vector<int64_t> path;
966
967 int64_t current_parent_id = parent.first;
968 while (current_parent_id != -1) {
969 auto id_it = interned_ids.find(current_parent_id);
970 PERFETTO_CHECK(id_it != interned_ids.end());
971
972 auto parent_location_id = id_it->second;
973 path.push_back(parent_location_id);
974
975 // Find parent of the parent
976 auto parent_id_it = parents.find(current_parent_id);
977 PERFETTO_CHECK(parent_id_it != parents.end());
978
979 current_parent_id = parent_id_it->second;
980 }
981
982 // Reverse to make it root-first list
983 std::reverse(path.begin(), path.end());
984
985 tracker.MaybeSetCallsiteLocations(parent.first, path);
986 }
987
988 return tracker;
989 }
990
TraceToHeapPprof(trace_processor::TraceProcessor * tp,std::vector<SerializedProfile> * output,uint64_t target_pid,const std::vector<uint64_t> & target_timestamps)991 bool TraceToHeapPprof(trace_processor::TraceProcessor* tp,
992 std::vector<SerializedProfile>* output,
993 uint64_t target_pid,
994 const std::vector<uint64_t>& target_timestamps) {
995 trace_processor::StringPool interner;
996
997 // Find all heap graphs available in the trace and iterate over them
998 Iterator it = tp->ExecuteQuery(
999 "select distinct hgo.graph_sample_ts, hgo.upid, p.pid from "
1000 "heap_graph_object hgo join process p using (upid)");
1001
1002 while (it.Next()) {
1003 uint64_t ts = static_cast<uint64_t>(it.Get(0).AsLong());
1004 uint64_t upid = static_cast<uint64_t>(it.Get(1).AsLong());
1005 uint64_t profile_pid = static_cast<uint64_t>(it.Get(2).AsLong());
1006
1007 if ((target_pid > 0 && profile_pid != target_pid) ||
1008 (!target_timestamps.empty() &&
1009 std::find(target_timestamps.begin(), target_timestamps.end(), ts) ==
1010 target_timestamps.end())) {
1011 continue;
1012 }
1013
1014 // flamegraph id -> view values
1015 std::unordered_map<int64_t, std::vector<int64_t>> view_values;
1016
1017 std::vector<View> views;
1018 views.assign(std::begin(kJavaAllocationViews),
1019 std::end(kJavaAllocationViews));
1020
1021 LocationTracker locations = PreprocessLocationsForJavaHeap(
1022 tp, &interner, views, view_values, upid, ts);
1023
1024 GProfileBuilder builder(locations, &interner);
1025
1026 std::vector<std::pair<std::string, std::string>> sample_types;
1027 for (const auto& view : views) {
1028 sample_types.emplace_back(view.type, view.unit);
1029 }
1030 builder.WriteSampleTypes(sample_types);
1031
1032 std::string profile_proto;
1033 if (WriteAllocations(&builder, view_values)) {
1034 profile_proto = builder.CompleteProfile(tp, /*write_mappings=*/false);
1035 }
1036
1037 output->emplace_back(SerializedProfile{ProfileType::kJavaHeapProfile,
1038 profile_pid,
1039 std::move(profile_proto), ""});
1040 }
1041
1042 if (!it.Status().ok()) {
1043 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
1044 it.Status().message().c_str());
1045 return false;
1046 }
1047
1048 return true;
1049 }
1050 } // namespace java_heap_profile
1051
1052 namespace perf_profile {
1053 struct ProcessInfo {
1054 uint64_t pid;
1055 std::vector<uint64_t> utids;
1056 };
1057
1058 // Returns a map of upid -> {pid, utids[]} for sampled processes.
GetProcessMap(trace_processor::TraceProcessor * tp)1059 static std::map<uint64_t, ProcessInfo> GetProcessMap(
1060 trace_processor::TraceProcessor* tp) {
1061 Iterator it = tp->ExecuteQuery(
1062 "select distinct process.upid, process.pid, thread.utid from perf_sample "
1063 "join thread using (utid) join process using (upid) where callsite_id is "
1064 "not null order by process.upid asc");
1065 std::map<uint64_t, ProcessInfo> process_map;
1066 while (it.Next()) {
1067 uint64_t upid = static_cast<uint64_t>(it.Get(0).AsLong());
1068 uint64_t pid = static_cast<uint64_t>(it.Get(1).AsLong());
1069 uint64_t utid = static_cast<uint64_t>(it.Get(2).AsLong());
1070 process_map[upid].pid = pid;
1071 process_map[upid].utids.push_back(utid);
1072 }
1073 if (!it.Status().ok()) {
1074 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
1075 it.Status().message().c_str());
1076 return {};
1077 }
1078 return process_map;
1079 }
1080
LogTracePerfEventIssues(trace_processor::TraceProcessor * tp)1081 static void LogTracePerfEventIssues(trace_processor::TraceProcessor* tp) {
1082 std::optional<int64_t> stat = GetStatsEntry(tp, "perf_samples_skipped");
1083 if (!stat.has_value()) {
1084 PERFETTO_DFATAL_OR_ELOG("Failed to look up perf_samples_skipped stat");
1085 } else if (stat.value() > 0) {
1086 PERFETTO_ELOG(
1087 "Warning: the trace recorded %" PRIi64
1088 " skipped samples, which otherwise matched the tracing config. This "
1089 "would cause a process to be completely absent from the trace, but "
1090 "does *not* imply data loss in any of the output profiles.",
1091 stat.value());
1092 }
1093
1094 stat = GetStatsEntry(tp, "perf_samples_skipped_dataloss");
1095 if (!stat.has_value()) {
1096 PERFETTO_DFATAL_OR_ELOG(
1097 "Failed to look up perf_samples_skipped_dataloss stat");
1098 } else if (stat.value() > 0) {
1099 PERFETTO_ELOG("DATA LOSS: the trace recorded %" PRIi64
1100 " lost perf samples (within traced_perf). This means that "
1101 "the trace is missing information, but it is not known "
1102 "which profile that affected.",
1103 stat.value());
1104 }
1105
1106 // Check if any per-cpu ringbuffers encountered dataloss (as recorded by the
1107 // kernel).
1108 Iterator it = tp->ExecuteQuery(
1109 "select idx, value from stats where name == 'perf_cpu_lost_records' and "
1110 "value > 0 order by idx asc");
1111 while (it.Next()) {
1112 PERFETTO_ELOG(
1113 "DATA LOSS: during the trace, the per-cpu kernel ring buffer for cpu "
1114 "%" PRIi64 " recorded %" PRIi64
1115 " lost samples. This means that the trace is missing information, "
1116 "but it is not known which profile that affected.",
1117 static_cast<int64_t>(it.Get(0).AsLong()),
1118 static_cast<int64_t>(it.Get(1).AsLong()));
1119 }
1120 if (!it.Status().ok()) {
1121 PERFETTO_DFATAL_OR_ELOG("Invalid iterator: %s",
1122 it.Status().message().c_str());
1123 }
1124 }
1125
1126 // TODO(rsavitski): decide whether errors in |AddSample| should result in an
1127 // empty profile (and/or whether they should make the overall conversion
1128 // unsuccessful). Furthermore, clarify the return value's semantics for both
1129 // perf and heap profiles.
TraceToPerfPprof(trace_processor::TraceProcessor * tp,std::vector<SerializedProfile> * output,bool annotate_frames,uint64_t target_pid)1130 static bool TraceToPerfPprof(trace_processor::TraceProcessor* tp,
1131 std::vector<SerializedProfile>* output,
1132 bool annotate_frames,
1133 uint64_t target_pid) {
1134 trace_processor::StringPool interner;
1135 LocationTracker locations =
1136 PreprocessLocations(tp, &interner, annotate_frames);
1137
1138 LogTracePerfEventIssues(tp);
1139
1140 // Aggregate samples by upid when building profiles.
1141 std::map<uint64_t, ProcessInfo> process_map = GetProcessMap(tp);
1142 for (const auto& p : process_map) {
1143 const ProcessInfo& process = p.second;
1144
1145 if (target_pid != 0 && process.pid != target_pid)
1146 continue;
1147
1148 GProfileBuilder builder(locations, &interner);
1149 builder.WriteSampleTypes({{"samples", "count"}});
1150
1151 std::string query = "select callsite_id from perf_sample where utid in (" +
1152 AsCsvString(process.utids) +
1153 ") and callsite_id is not null order by ts asc;";
1154
1155 protozero::PackedVarInt single_count_value;
1156 single_count_value.Append(1);
1157
1158 Iterator it = tp->ExecuteQuery(query);
1159 while (it.Next()) {
1160 int64_t callsite_id = static_cast<int64_t>(it.Get(0).AsLong());
1161 builder.AddSample(single_count_value, callsite_id);
1162 }
1163 if (!it.Status().ok()) {
1164 PERFETTO_DFATAL_OR_ELOG("Failed to iterate over samples: %s",
1165 it.Status().c_message());
1166 return false;
1167 }
1168
1169 std::string profile_proto = builder.CompleteProfile(tp);
1170 output->emplace_back(SerializedProfile{
1171 ProfileType::kPerfProfile, process.pid, std::move(profile_proto), ""});
1172 }
1173 return true;
1174 }
1175 } // namespace perf_profile
1176 } // namespace
1177
TraceToPprof(trace_processor::TraceProcessor * tp,std::vector<SerializedProfile> * output,ConversionMode mode,uint64_t flags,uint64_t pid,const std::vector<uint64_t> & timestamps)1178 bool TraceToPprof(trace_processor::TraceProcessor* tp,
1179 std::vector<SerializedProfile>* output,
1180 ConversionMode mode,
1181 uint64_t flags,
1182 uint64_t pid,
1183 const std::vector<uint64_t>& timestamps) {
1184 bool annotate_frames =
1185 flags & static_cast<uint64_t>(ConversionFlags::kAnnotateFrames);
1186 switch (mode) {
1187 case (ConversionMode::kHeapProfile):
1188 return heap_profile::TraceToHeapPprof(tp, output, annotate_frames, pid,
1189 timestamps);
1190 case (ConversionMode::kPerfProfile):
1191 return perf_profile::TraceToPerfPprof(tp, output, annotate_frames, pid);
1192 case (ConversionMode::kJavaHeapProfile):
1193 return java_heap_profile::TraceToHeapPprof(tp, output, pid, timestamps);
1194 }
1195 PERFETTO_FATAL("unknown conversion option"); // for gcc
1196 }
1197
1198 } // namespace trace_to_text
1199 } // namespace perfetto
1200