xref: /aosp_15_r20/external/perfetto/src/traced/probes/ftrace/cpu_reader.cc (revision 6dbdd20afdafa5e3ca9b8809fa73465d530080dc)
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/traced/probes/ftrace/cpu_reader.h"
18 
19 #include <dirent.h>
20 #include <fcntl.h>
21 
22 #include <algorithm>
23 #include <optional>
24 #include <utility>
25 
26 #include "perfetto/base/logging.h"
27 #include "perfetto/ext/base/metatrace.h"
28 #include "perfetto/ext/base/utils.h"
29 #include "perfetto/ext/tracing/core/trace_writer.h"
30 #include "src/kallsyms/kernel_symbol_map.h"
31 #include "src/kallsyms/lazy_kernel_symbolizer.h"
32 #include "src/traced/probes/ftrace/ftrace_config_muxer.h"
33 #include "src/traced/probes/ftrace/ftrace_controller.h"  // FtraceClockSnapshot
34 #include "src/traced/probes/ftrace/ftrace_data_source.h"
35 #include "src/traced/probes/ftrace/ftrace_print_filter.h"
36 #include "src/traced/probes/ftrace/proto_translation_table.h"
37 
38 #include "protos/perfetto/trace/ftrace/ftrace_event.pbzero.h"
39 #include "protos/perfetto/trace/ftrace/ftrace_event_bundle.pbzero.h"
40 #include "protos/perfetto/trace/ftrace/ftrace_stats.pbzero.h"  // FtraceParseStatus
41 #include "protos/perfetto/trace/ftrace/generic.pbzero.h"
42 #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
43 #include "protos/perfetto/trace/profiling/profile_common.pbzero.h"
44 #include "protos/perfetto/trace/trace_packet.pbzero.h"
45 
46 namespace perfetto {
47 namespace {
48 
49 using FtraceParseStatus = protos::pbzero::FtraceParseStatus;
50 using protos::pbzero::KprobeEvent;
51 
52 // If the compact_sched buffer accumulates more unique strings, the reader will
53 // flush it to reset the interning state (and make it cheap again).
54 // This is not an exact cap, since we check only at tracing page boundaries.
55 constexpr size_t kCompactSchedInternerThreshold = 64;
56 
57 // For further documentation of these constants see the kernel source:
58 //   linux/include/linux/ring_buffer.h
59 // Some of this is also available to userspace at runtime via:
60 //   /sys/kernel/tracing/events/header_event
61 constexpr uint32_t kTypePadding = 29;
62 constexpr uint32_t kTypeTimeExtend = 30;
63 constexpr uint32_t kTypeTimeStamp = 31;
64 
65 struct EventHeader {
66   // bottom 5 bits
67   uint32_t type_or_length : 5;
68   // top 27 bits
69   uint32_t time_delta : 27;
70 };
71 
72 // Reads a string from `start` until the first '\0' byte or until fixed_len
73 // characters have been read. Appends it to `*out` as field `field_id`.
ReadIntoString(const uint8_t * start,size_t fixed_len,uint32_t field_id,protozero::Message * out)74 void ReadIntoString(const uint8_t* start,
75                     size_t fixed_len,
76                     uint32_t field_id,
77                     protozero::Message* out) {
78   size_t len = strnlen(reinterpret_cast<const char*>(start), fixed_len);
79   out->AppendBytes(field_id, reinterpret_cast<const char*>(start), len);
80 }
81 
ReadDataLoc(const uint8_t * start,const uint8_t * field_start,const uint8_t * end,const Field & field,protozero::Message * message)82 bool ReadDataLoc(const uint8_t* start,
83                  const uint8_t* field_start,
84                  const uint8_t* end,
85                  const Field& field,
86                  protozero::Message* message) {
87   PERFETTO_DCHECK(field.ftrace_size == 4);
88   // See kernel header include/trace/trace_events.h
89   uint32_t data = 0;
90   const uint8_t* ptr = field_start;
91   if (!CpuReader::ReadAndAdvance(&ptr, end, &data)) {
92     PERFETTO_DFATAL("couldn't read __data_loc value");
93     return false;
94   }
95 
96   const uint16_t offset = data & 0xffff;
97   const uint16_t len = (data >> 16) & 0xffff;
98   const uint8_t* const string_start = start + offset;
99 
100   if (PERFETTO_UNLIKELY(len == 0))
101     return true;
102   if (PERFETTO_UNLIKELY(string_start < start || string_start + len > end)) {
103     PERFETTO_DFATAL("__data_loc points at invalid location");
104     return false;
105   }
106   ReadIntoString(string_start, len, field.proto_field_id, message);
107   return true;
108 }
109 
110 template <typename T>
ReadValue(const uint8_t * ptr)111 T ReadValue(const uint8_t* ptr) {
112   T t;
113   memcpy(&t, reinterpret_cast<const void*>(ptr), sizeof(T));
114   return t;
115 }
116 
117 // Reads a signed ftrace value as an int64_t, sign extending if necessary.
ReadSignedFtraceValue(const uint8_t * ptr,FtraceFieldType ftrace_type)118 int64_t ReadSignedFtraceValue(const uint8_t* ptr, FtraceFieldType ftrace_type) {
119   if (ftrace_type == kFtraceInt32) {
120     int32_t value;
121     memcpy(&value, reinterpret_cast<const void*>(ptr), sizeof(value));
122     return int64_t(value);
123   }
124   if (ftrace_type == kFtraceInt64) {
125     int64_t value;
126     memcpy(&value, reinterpret_cast<const void*>(ptr), sizeof(value));
127     return value;
128   }
129   PERFETTO_FATAL("unexpected ftrace type");
130 }
131 
SetBlocking(int fd,bool is_blocking)132 bool SetBlocking(int fd, bool is_blocking) {
133   int flags = fcntl(fd, F_GETFL, 0);
134   flags = (is_blocking) ? (flags & ~O_NONBLOCK) : (flags | O_NONBLOCK);
135   return fcntl(fd, F_SETFL, flags) == 0;
136 }
137 
SetParseError(const std::set<FtraceDataSource * > & started_data_sources,size_t cpu,FtraceParseStatus status)138 void SetParseError(const std::set<FtraceDataSource*>& started_data_sources,
139                    size_t cpu,
140                    FtraceParseStatus status) {
141   PERFETTO_DPLOG("[cpu%zu]: unexpected ftrace read error: %s", cpu,
142                  protos::pbzero::FtraceParseStatus_Name(status));
143   for (FtraceDataSource* data_source : started_data_sources) {
144     data_source->mutable_parse_errors()->insert(status);
145   }
146 }
147 
WriteAndSetParseError(CpuReader::Bundler * bundler,base::FlatSet<FtraceParseStatus> * stat,uint64_t timestamp,FtraceParseStatus status)148 void WriteAndSetParseError(CpuReader::Bundler* bundler,
149                            base::FlatSet<FtraceParseStatus>* stat,
150                            uint64_t timestamp,
151                            FtraceParseStatus status) {
152   PERFETTO_DLOG("Error parsing ftrace page: %s",
153                 protos::pbzero::FtraceParseStatus_Name(status));
154   stat->insert(status);
155   auto* proto = bundler->GetOrCreateBundle()->add_error();
156   if (timestamp)
157     proto->set_timestamp(timestamp);
158   proto->set_status(status);
159 }
160 
161 }  // namespace
162 
163 using protos::pbzero::GenericFtraceEvent;
164 
CpuReader(size_t cpu,base::ScopedFile trace_fd,const ProtoTranslationTable * table,LazyKernelSymbolizer * symbolizer,protos::pbzero::FtraceClock ftrace_clock,const FtraceClockSnapshot * ftrace_clock_snapshot)165 CpuReader::CpuReader(size_t cpu,
166                      base::ScopedFile trace_fd,
167                      const ProtoTranslationTable* table,
168                      LazyKernelSymbolizer* symbolizer,
169                      protos::pbzero::FtraceClock ftrace_clock,
170                      const FtraceClockSnapshot* ftrace_clock_snapshot)
171     : cpu_(cpu),
172       table_(table),
173       symbolizer_(symbolizer),
174       trace_fd_(std::move(trace_fd)),
175       ftrace_clock_(ftrace_clock),
176       ftrace_clock_snapshot_(ftrace_clock_snapshot) {
177   PERFETTO_CHECK(trace_fd_);
178   PERFETTO_CHECK(SetBlocking(*trace_fd_, false));
179 }
180 
181 CpuReader::~CpuReader() = default;
182 
ReadCycle(ParsingBuffers * parsing_bufs,size_t max_pages,const std::set<FtraceDataSource * > & started_data_sources)183 size_t CpuReader::ReadCycle(
184     ParsingBuffers* parsing_bufs,
185     size_t max_pages,
186     const std::set<FtraceDataSource*>& started_data_sources) {
187   PERFETTO_DCHECK(max_pages > 0 && parsing_bufs->ftrace_data_buf_pages() > 0);
188   metatrace::ScopedEvent evt(metatrace::TAG_FTRACE,
189                              metatrace::FTRACE_CPU_READ_CYCLE);
190 
191   // Work in batches to keep cache locality, and limit memory usage.
192   size_t total_pages_read = 0;
193   for (bool is_first_batch = true;; is_first_batch = false) {
194     size_t batch_pages = std::min(parsing_bufs->ftrace_data_buf_pages(),
195                                   max_pages - total_pages_read);
196     size_t pages_read = ReadAndProcessBatch(
197         parsing_bufs->ftrace_data_buf(), batch_pages, is_first_batch,
198         parsing_bufs->compact_sched_buf(), started_data_sources);
199 
200     PERFETTO_DCHECK(pages_read <= batch_pages);
201     total_pages_read += pages_read;
202 
203     // Check whether we've caught up to the writer, or possibly giving up on
204     // this attempt due to some error.
205     if (pages_read != batch_pages)
206       break;
207     // Check if we've hit the limit of work for this cycle.
208     if (total_pages_read >= max_pages)
209       break;
210   }
211   PERFETTO_METATRACE_COUNTER(TAG_FTRACE, FTRACE_PAGES_DRAINED,
212                              total_pages_read);
213   return total_pages_read;
214 }
215 
216 // metatrace note: mark the reading phase as FTRACE_CPU_READ_BATCH, but let the
217 // parsing time be implied (by the difference between the caller's span, and
218 // this reading span). Makes it easier to estimate the read/parse ratio when
219 // looking at the trace in the UI.
ReadAndProcessBatch(uint8_t * parsing_buf,size_t max_pages,bool first_batch_in_cycle,CompactSchedBuffer * compact_sched_buf,const std::set<FtraceDataSource * > & started_data_sources)220 size_t CpuReader::ReadAndProcessBatch(
221     uint8_t* parsing_buf,
222     size_t max_pages,
223     bool first_batch_in_cycle,
224     CompactSchedBuffer* compact_sched_buf,
225     const std::set<FtraceDataSource*>& started_data_sources) {
226   const uint32_t sys_page_size = base::GetSysPageSize();
227   size_t pages_read = 0;
228   {
229     metatrace::ScopedEvent evt(metatrace::TAG_FTRACE,
230                                metatrace::FTRACE_CPU_READ_BATCH);
231     for (; pages_read < max_pages;) {
232       uint8_t* curr_page = parsing_buf + (pages_read * sys_page_size);
233       ssize_t res = PERFETTO_EINTR(read(*trace_fd_, curr_page, sys_page_size));
234       if (res < 0) {
235         // Expected errors:
236         // EAGAIN: no data (since we're in non-blocking mode).
237         // ENOMEM, EBUSY: temporary ftrace failures (they happen).
238         // ENODEV: the cpu is offline (b/145583318).
239         if (errno != EAGAIN && errno != ENOMEM && errno != EBUSY &&
240             errno != ENODEV) {
241           SetParseError(started_data_sources, cpu_,
242                         FtraceParseStatus::FTRACE_STATUS_UNEXPECTED_READ_ERROR);
243         }
244         break;  // stop reading regardless of errno
245       }
246 
247       // As long as all of our reads are for a single page, the kernel should
248       // return exactly a well-formed raw ftrace page (if not in the steady
249       // state of reading out fully-written pages, the kernel will construct
250       // pages as necessary, copying over events and zero-filling at the end).
251       // A sub-page read() is therefore not expected in practice. Kernel source
252       // pointer: see usage of |info->read| within |tracing_buffers_read|.
253       if (res == 0) {
254         // Very rare, but possible. Stop for now, as this seems to occur when
255         // we've caught up to the writer.
256         PERFETTO_DLOG("[cpu%zu]: 0-sized read from ftrace pipe.", cpu_);
257         break;
258       }
259       if (res != static_cast<ssize_t>(sys_page_size)) {
260         SetParseError(started_data_sources, cpu_,
261                       FtraceParseStatus::FTRACE_STATUS_PARTIAL_PAGE_READ);
262         break;
263       }
264 
265       pages_read += 1;
266 
267       // Heuristic for detecting whether we've caught up to the writer, based on
268       // how much data is in this tracing page. To figure out the amount of
269       // ftrace data, we need to parse the page header (since the read always
270       // returns a page, zero-filled at the end). If we read fewer bytes than
271       // the threshold, it means that we caught up with the write pointer and we
272       // started consuming ftrace events in real-time. This cannot be just 4096
273       // because it needs to account for fragmentation, i.e. for the fact that
274       // the last trace event didn't fit in the current page and hence the
275       // current page was terminated prematurely. This threshold is quite
276       // permissive since Android userspace tracing can log >500 byte strings
277       // via ftrace/print events.
278       // It's still possible for false positives if events can be bigger than
279       // half a page, but we don't have a robust way of checking buffer
280       // occupancy with nonblocking reads. This can be revisited once all
281       // kernels can be assumed to have bug-free poll() or reliable
282       // tracefs/per_cpu/cpuX/stats values.
283       static const size_t kPageFillThreshold = sys_page_size / 2;
284       const uint8_t* scratch_ptr = curr_page;
285       std::optional<PageHeader> hdr =
286           ParsePageHeader(&scratch_ptr, table_->page_header_size_len());
287       PERFETTO_DCHECK(hdr && hdr->size > 0 && hdr->size <= sys_page_size);
288       if (!hdr.has_value()) {
289         // The header error will be logged by ProcessPagesForDataSource.
290         break;
291       }
292       // Note that the first read after starting the read cycle being small is
293       // normal. It means that we're given the remainder of events from a
294       // page that we've partially consumed during the last read of the previous
295       // cycle (having caught up to the writer).
296       if (hdr->size < kPageFillThreshold &&
297           !(first_batch_in_cycle && pages_read == 1)) {
298         break;
299       }
300     }
301   }  // end of metatrace::FTRACE_CPU_READ_BATCH
302 
303   // Parse the pages and write to the trace for all relevant data sources.
304   if (pages_read == 0)
305     return pages_read;
306 
307   for (FtraceDataSource* data_source : started_data_sources) {
308     ProcessPagesForDataSource(
309         data_source->trace_writer(), data_source->mutable_metadata(), cpu_,
310         data_source->parsing_config(), data_source->mutable_parse_errors(),
311         data_source->mutable_bundle_end_timestamp(cpu_), parsing_buf,
312         pages_read, compact_sched_buf, table_, symbolizer_,
313         ftrace_clock_snapshot_, ftrace_clock_);
314   }
315   return pages_read;
316 }
317 
StartNewPacket(bool lost_events,uint64_t previous_bundle_end_timestamp)318 void CpuReader::Bundler::StartNewPacket(
319     bool lost_events,
320     uint64_t previous_bundle_end_timestamp) {
321   FinalizeAndRunSymbolizer();
322   packet_ = trace_writer_->NewTracePacket();
323   bundle_ = packet_->set_ftrace_events();
324 
325   bundle_->set_cpu(static_cast<uint32_t>(cpu_));
326   if (lost_events) {
327     bundle_->set_lost_events(true);
328   }
329 
330   // note: set-to-zero is valid and expected for the first bundle per cpu
331   // (outside of concurrent tracing), with the effective meaning of "all data is
332   // valid since the data source was started".
333   bundle_->set_previous_bundle_end_timestamp(previous_bundle_end_timestamp);
334 
335   if (ftrace_clock_) {
336     bundle_->set_ftrace_clock(ftrace_clock_);
337     if (ftrace_clock_snapshot_ && ftrace_clock_snapshot_->ftrace_clock_ts) {
338       bundle_->set_ftrace_timestamp(ftrace_clock_snapshot_->ftrace_clock_ts);
339       bundle_->set_boot_timestamp(ftrace_clock_snapshot_->boot_clock_ts);
340     }
341   }
342 }
343 
FinalizeAndRunSymbolizer()344 void CpuReader::Bundler::FinalizeAndRunSymbolizer() {
345   if (!packet_) {
346     return;
347   }
348 
349   if (compact_sched_enabled_) {
350     compact_sched_buf_->WriteAndReset(bundle_);
351   }
352 
353   bundle_->Finalize();
354   bundle_ = nullptr;
355   // Write the kernel symbol index (mangled address) -> name table.
356   // |metadata| is shared across all cpus, is distinct per |data_source| (i.e.
357   // tracing session) and is cleared after each FtraceController::ReadTick().
358   if (symbolizer_) {
359     // Symbol indexes are assigned mononically as |kernel_addrs.size()|,
360     // starting from index 1 (no symbol has index 0). Here we remember the
361     // size() (which is also == the highest value in |kernel_addrs|) at the
362     // beginning and only write newer indexes bigger than that.
363     uint32_t max_index_at_start = metadata_->last_kernel_addr_index_written;
364     PERFETTO_DCHECK(max_index_at_start <= metadata_->kernel_addrs.size());
365     protos::pbzero::InternedData* interned_data = nullptr;
366     auto* ksyms_map = symbolizer_->GetOrCreateKernelSymbolMap();
367     bool wrote_at_least_one_symbol = false;
368     for (const FtraceMetadata::KernelAddr& kaddr : metadata_->kernel_addrs) {
369       if (kaddr.index <= max_index_at_start)
370         continue;
371       std::string sym_name = ksyms_map->Lookup(kaddr.addr);
372       if (sym_name.empty()) {
373         // Lookup failed. This can genuinely happen in many occasions. E.g.,
374         // workqueue_execute_start has two pointers: one is a pointer to a
375         // function (which we expect to be symbolized), the other (|work|) is
376         // a pointer to a heap struct, which is unsymbolizable, even when
377         // using the textual ftrace endpoint.
378         continue;
379       }
380 
381       if (!interned_data) {
382         // If this is the very first write, clear the start of the sequence
383         // so the trace processor knows that all previous indexes can be
384         // discarded and that the mapping is restarting.
385         // In most cases this occurs with cpu==0. But if cpu0 is idle, this
386         // will happen with the first CPU that has any ftrace data.
387         if (max_index_at_start == 0) {
388           packet_->set_sequence_flags(
389               protos::pbzero::TracePacket::SEQ_INCREMENTAL_STATE_CLEARED);
390         }
391         interned_data = packet_->set_interned_data();
392       }
393       auto* interned_sym = interned_data->add_kernel_symbols();
394       interned_sym->set_iid(kaddr.index);
395       interned_sym->set_str(sym_name);
396       wrote_at_least_one_symbol = true;
397     }
398 
399     auto max_it_at_end = static_cast<uint32_t>(metadata_->kernel_addrs.size());
400 
401     // Rationale for the if (wrote_at_least_one_symbol) check: in rare cases,
402     // all symbols seen in a ProcessPagesForDataSource() call can fail the
403     // ksyms_map->Lookup(). If that happens we don't want to bump the
404     // last_kernel_addr_index_written watermark, as that would cause the next
405     // call to NOT emit the SEQ_INCREMENTAL_STATE_CLEARED.
406     if (wrote_at_least_one_symbol) {
407       metadata_->last_kernel_addr_index_written = max_it_at_end;
408     }
409   }
410   packet_ = TraceWriter::TracePacketHandle(nullptr);
411 }
412 
413 // Error handling: will attempt parsing all pages even if there are errors in
414 // parsing the binary layout of the data. The error will be recorded in the
415 // event bundle proto with a timestamp, letting the trace processor decide
416 // whether to discard or keep the post-error data. Previously, we crashed as
417 // soon as we encountered such an error.
418 // static
ProcessPagesForDataSource(TraceWriter * trace_writer,FtraceMetadata * metadata,size_t cpu,const FtraceDataSourceConfig * ds_config,base::FlatSet<protos::pbzero::FtraceParseStatus> * parse_errors,uint64_t * bundle_end_timestamp,const uint8_t * parsing_buf,const size_t pages_read,CompactSchedBuffer * compact_sched_buf,const ProtoTranslationTable * table,LazyKernelSymbolizer * symbolizer,const FtraceClockSnapshot * ftrace_clock_snapshot,protos::pbzero::FtraceClock ftrace_clock)419 bool CpuReader::ProcessPagesForDataSource(
420     TraceWriter* trace_writer,
421     FtraceMetadata* metadata,
422     size_t cpu,
423     const FtraceDataSourceConfig* ds_config,
424     base::FlatSet<protos::pbzero::FtraceParseStatus>* parse_errors,
425     uint64_t* bundle_end_timestamp,
426     const uint8_t* parsing_buf,
427     const size_t pages_read,
428     CompactSchedBuffer* compact_sched_buf,
429     const ProtoTranslationTable* table,
430     LazyKernelSymbolizer* symbolizer,
431     const FtraceClockSnapshot* ftrace_clock_snapshot,
432     protos::pbzero::FtraceClock ftrace_clock) {
433   const uint32_t sys_page_size = base::GetSysPageSize();
434   Bundler bundler(trace_writer, metadata,
435                   ds_config->symbolize_ksyms ? symbolizer : nullptr, cpu,
436                   ftrace_clock_snapshot, ftrace_clock, compact_sched_buf,
437                   ds_config->compact_sched.enabled, *bundle_end_timestamp);
438 
439   bool success = true;
440   size_t pages_parsed = 0;
441   bool compact_sched_enabled = ds_config->compact_sched.enabled;
442   for (; pages_parsed < pages_read; pages_parsed++) {
443     const uint8_t* curr_page = parsing_buf + (pages_parsed * sys_page_size);
444     const uint8_t* curr_page_end = curr_page + sys_page_size;
445     const uint8_t* parse_pos = curr_page;
446     std::optional<PageHeader> page_header =
447         ParsePageHeader(&parse_pos, table->page_header_size_len());
448 
449     if (!page_header.has_value() || page_header->size == 0 ||
450         parse_pos >= curr_page_end ||
451         parse_pos + page_header->size > curr_page_end) {
452       WriteAndSetParseError(
453           &bundler, parse_errors,
454           page_header.has_value() ? page_header->timestamp : 0,
455           FtraceParseStatus::FTRACE_STATUS_ABI_INVALID_PAGE_HEADER);
456       success = false;
457       continue;
458     }
459 
460     // Start a new bundle if either:
461     // * The page we're about to read indicates that there was a kernel ring
462     //   buffer overrun since our last read from that per-cpu buffer. We have
463     //   a single |lost_events| field per bundle, so start a new packet.
464     // * The compact_sched buffer is holding more unique interned strings than
465     //   a threshold. We need to flush the compact buffer to make the
466     //   interning lookups cheap again.
467     bool interner_past_threshold =
468         compact_sched_enabled &&
469         bundler.compact_sched_buf()->interner().interned_comms_size() >
470             kCompactSchedInternerThreshold;
471 
472     if (page_header->lost_events || interner_past_threshold) {
473       // pass in an updated bundle_end_timestamp since we're starting a new
474       // bundle, which needs to reference the last timestamp from the prior one.
475       bundler.StartNewPacket(page_header->lost_events, *bundle_end_timestamp);
476     }
477 
478     FtraceParseStatus status =
479         ParsePagePayload(parse_pos, &page_header.value(), table, ds_config,
480                          &bundler, metadata, bundle_end_timestamp);
481 
482     if (status != FtraceParseStatus::FTRACE_STATUS_OK) {
483       WriteAndSetParseError(&bundler, parse_errors, page_header->timestamp,
484                             status);
485       success = false;
486       continue;
487     }
488   }
489   // bundler->FinalizeAndRunSymbolizer() will run as part of the destructor.
490   return success;
491 }
492 
493 // A page header consists of:
494 // * timestamp: 8 bytes
495 // * commit: 8 bytes on 64 bit, 4 bytes on 32 bit kernels
496 //
497 // The kernel reports this at /sys/kernel/debug/tracing/events/header_page.
498 //
499 // |commit|'s bottom bits represent the length of the payload following this
500 // header. The top bits have been repurposed as a bitset of flags pertaining to
501 // data loss. We look only at the "there has been some data lost" flag
502 // (RB_MISSED_EVENTS), and ignore the relatively tricky "appended the precise
503 // lost events count past the end of the valid data, as there was room to do so"
504 // flag (RB_MISSED_STORED).
505 //
506 // static
ParsePageHeader(const uint8_t ** ptr,uint16_t page_header_size_len)507 std::optional<CpuReader::PageHeader> CpuReader::ParsePageHeader(
508     const uint8_t** ptr,
509     uint16_t page_header_size_len) {
510   // Mask for the data length portion of the |commit| field. Note that the
511   // kernel implementation never explicitly defines the boundary (beyond using
512   // bits 30 and 31 as flags), but 27 bits are mentioned as sufficient in the
513   // original commit message, and is the constant used by trace-cmd.
514   constexpr static uint64_t kDataSizeMask = (1ull << 27) - 1;
515   // If set, indicates that the relevant cpu has lost events since the last read
516   // (clearing the bit internally).
517   constexpr static uint64_t kMissedEventsFlag = (1ull << 31);
518 
519   const uint8_t* end_of_page = *ptr + base::GetSysPageSize();
520   PageHeader page_header;
521   if (!CpuReader::ReadAndAdvance<uint64_t>(ptr, end_of_page,
522                                            &page_header.timestamp))
523     return std::nullopt;
524 
525   uint32_t size_and_flags;
526 
527   // On little endian, we can just read a uint32_t and reject the rest of the
528   // number later.
529   if (!CpuReader::ReadAndAdvance<uint32_t>(
530           ptr, end_of_page, base::AssumeLittleEndian(&size_and_flags)))
531     return std::nullopt;
532 
533   page_header.size = size_and_flags & kDataSizeMask;
534   page_header.lost_events = bool(size_and_flags & kMissedEventsFlag);
535   PERFETTO_DCHECK(page_header.size <= base::GetSysPageSize());
536 
537   // Reject rest of the number, if applicable. On 32-bit, size_bytes - 4 will
538   // evaluate to 0 and this will be a no-op. On 64-bit, this will advance by 4
539   // bytes.
540   PERFETTO_DCHECK(page_header_size_len >= 4);
541   *ptr += page_header_size_len - 4;
542 
543   return std::make_optional(page_header);
544 }
545 
546 // A raw ftrace buffer page consists of a header followed by a sequence of
547 // binary ftrace events. See |ParsePageHeader| for the format of the earlier.
548 //
549 // Error handling: if the binary data disagrees with our understanding of the
550 // ring buffer layout, returns an error and skips the rest of the page (but some
551 // events may have already been parsed and serialised).
552 //
553 // This method is deliberately static so it can be tested independently.
ParsePagePayload(const uint8_t * start_of_payload,const PageHeader * page_header,const ProtoTranslationTable * table,const FtraceDataSourceConfig * ds_config,Bundler * bundler,FtraceMetadata * metadata,uint64_t * bundle_end_timestamp)554 protos::pbzero::FtraceParseStatus CpuReader::ParsePagePayload(
555     const uint8_t* start_of_payload,
556     const PageHeader* page_header,
557     const ProtoTranslationTable* table,
558     const FtraceDataSourceConfig* ds_config,
559     Bundler* bundler,
560     FtraceMetadata* metadata,
561     uint64_t* bundle_end_timestamp) {
562   const uint8_t* ptr = start_of_payload;
563   const uint8_t* const end = ptr + page_header->size;
564 
565   uint64_t timestamp = page_header->timestamp;
566   uint64_t last_written_event_ts = 0;
567 
568   while (ptr < end) {
569     EventHeader event_header;
570     if (!ReadAndAdvance(&ptr, end, &event_header))
571       return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_EVENT_HEADER;
572 
573     timestamp += event_header.time_delta;
574 
575     switch (event_header.type_or_length) {
576       case kTypePadding: {
577         // Left over page padding or discarded event.
578         if (event_header.time_delta == 0) {
579           // Should never happen: null padding event with unspecified size.
580           // Only written beyond page_header->size.
581           return FtraceParseStatus::FTRACE_STATUS_ABI_NULL_PADDING;
582         }
583         uint32_t length = 0;
584         if (!ReadAndAdvance<uint32_t>(&ptr, end, &length))
585           return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_PADDING_LENGTH;
586         // Length includes itself (4 bytes).
587         if (length < 4)
588           return FtraceParseStatus::FTRACE_STATUS_ABI_INVALID_PADDING_LENGTH;
589         ptr += length - 4;
590         break;
591       }
592       case kTypeTimeExtend: {
593         // Extend the time delta.
594         uint32_t time_delta_ext = 0;
595         if (!ReadAndAdvance<uint32_t>(&ptr, end, &time_delta_ext))
596           return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_TIME_EXTEND;
597         timestamp += (static_cast<uint64_t>(time_delta_ext)) << 27;
598         break;
599       }
600       case kTypeTimeStamp: {
601         // Absolute timestamp. This was historically partially implemented, but
602         // not written. Kernels 4.17+ reimplemented this record, changing its
603         // size in the process. We assume the newer layout. Parsed the same as
604         // kTypeTimeExtend, except that the timestamp is interpreted as an
605         // absolute, instead of a delta on top of the previous state.
606         uint32_t time_delta_ext = 0;
607         if (!ReadAndAdvance<uint32_t>(&ptr, end, &time_delta_ext))
608           return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_TIME_STAMP;
609         timestamp = event_header.time_delta +
610                     (static_cast<uint64_t>(time_delta_ext) << 27);
611         break;
612       }
613       // Data record:
614       default: {
615         // If type_or_length <=28, the record length is 4x that value.
616         // If type_or_length == 0, the length of the record is stored in the
617         // first uint32_t word of the payload.
618         uint32_t event_size = 0;
619         if (event_header.type_or_length == 0) {
620           if (!ReadAndAdvance<uint32_t>(&ptr, end, &event_size))
621             return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_DATA_LENGTH;
622           // Size includes itself (4 bytes). However we've seen rare
623           // contradictions on select Android 4.19+ kernels: the page header
624           // says there's still valid data, but the rest of the page is full of
625           // zeroes (which would not decode to a valid event). b/204564312.
626           if (event_size == 0)
627             return FtraceParseStatus::FTRACE_STATUS_ABI_ZERO_DATA_LENGTH;
628           else if (event_size < 4)
629             return FtraceParseStatus::FTRACE_STATUS_ABI_INVALID_DATA_LENGTH;
630           event_size -= 4;
631         } else {
632           event_size = 4 * event_header.type_or_length;
633         }
634         const uint8_t* start = ptr;
635         const uint8_t* next = ptr + event_size;
636 
637         if (next > end)
638           return FtraceParseStatus::FTRACE_STATUS_ABI_END_OVERFLOW;
639 
640         uint16_t ftrace_event_id = 0;
641         if (!ReadAndAdvance<uint16_t>(&ptr, end, &ftrace_event_id))
642           return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_EVENT_ID;
643 
644         if (ds_config->event_filter.IsEventEnabled(ftrace_event_id)) {
645           // Special-cased handling of some scheduler events when compact format
646           // is enabled.
647           bool compact_sched_enabled = ds_config->compact_sched.enabled;
648           const CompactSchedSwitchFormat& sched_switch_format =
649               table->compact_sched_format().sched_switch;
650           const CompactSchedWakingFormat& sched_waking_format =
651               table->compact_sched_format().sched_waking;
652 
653           // Special-cased filtering of ftrace/print events to retain only the
654           // matching events.
655           bool event_written = true;
656           bool ftrace_print_filter_enabled =
657               ds_config->print_filter.has_value();
658 
659           if (compact_sched_enabled &&
660               ftrace_event_id == sched_switch_format.event_id) {
661             if (event_size < sched_switch_format.size)
662               return FtraceParseStatus::FTRACE_STATUS_SHORT_COMPACT_EVENT;
663 
664             ParseSchedSwitchCompact(start, timestamp, &sched_switch_format,
665                                     bundler->compact_sched_buf(), metadata);
666           } else if (compact_sched_enabled &&
667                      ftrace_event_id == sched_waking_format.event_id) {
668             if (event_size < sched_waking_format.size)
669               return FtraceParseStatus::FTRACE_STATUS_SHORT_COMPACT_EVENT;
670 
671             ParseSchedWakingCompact(start, timestamp, &sched_waking_format,
672                                     bundler->compact_sched_buf(), metadata);
673           } else if (ftrace_print_filter_enabled &&
674                      ftrace_event_id == ds_config->print_filter->event_id()) {
675             if (ds_config->print_filter->IsEventInteresting(start, next)) {
676               protos::pbzero::FtraceEvent* event =
677                   bundler->GetOrCreateBundle()->add_event();
678               event->set_timestamp(timestamp);
679               if (!ParseEvent(ftrace_event_id, start, next, table, ds_config,
680                               event, metadata)) {
681                 return FtraceParseStatus::FTRACE_STATUS_INVALID_EVENT;
682               }
683             } else {  // print event did NOT pass the filter
684               event_written = false;
685             }
686           } else {
687             // Common case: parse all other types of enabled events.
688             protos::pbzero::FtraceEvent* event =
689                 bundler->GetOrCreateBundle()->add_event();
690             event->set_timestamp(timestamp);
691             if (!ParseEvent(ftrace_event_id, start, next, table, ds_config,
692                             event, metadata)) {
693               return FtraceParseStatus::FTRACE_STATUS_INVALID_EVENT;
694             }
695           }
696           if (event_written) {
697             last_written_event_ts = timestamp;
698           }
699         }  // IsEventEnabled(id)
700         ptr = next;
701       }              // case (data_record)
702     }                // switch (event_header.type_or_length)
703   }                  // while (ptr < end)
704 
705   if (last_written_event_ts)
706     *bundle_end_timestamp = last_written_event_ts;
707   return FtraceParseStatus::FTRACE_STATUS_OK;
708 }
709 
710 // |start| is the start of the current event.
711 // |end| is the end of the buffer.
ParseEvent(uint16_t ftrace_event_id,const uint8_t * start,const uint8_t * end,const ProtoTranslationTable * table,const FtraceDataSourceConfig * ds_config,protozero::Message * message,FtraceMetadata * metadata)712 bool CpuReader::ParseEvent(uint16_t ftrace_event_id,
713                            const uint8_t* start,
714                            const uint8_t* end,
715                            const ProtoTranslationTable* table,
716                            const FtraceDataSourceConfig* ds_config,
717                            protozero::Message* message,
718                            FtraceMetadata* metadata) {
719   PERFETTO_DCHECK(start < end);
720 
721   // The event must be enabled and known to reach here.
722   const Event& info = *table->GetEventById(ftrace_event_id);
723 
724   if (info.size > static_cast<size_t>(end - start)) {
725     PERFETTO_DLOG("Expected event length is beyond end of buffer.");
726     return false;
727   }
728 
729   bool success = true;
730   const Field* common_pid_field = table->common_pid();
731   if (PERFETTO_LIKELY(common_pid_field))
732     success &=
733         ParseField(*common_pid_field, start, end, table, message, metadata);
734 
735   protozero::Message* nested =
736       message->BeginNestedMessage<protozero::Message>(info.proto_field_id);
737 
738   // Parse generic (not known at compile time) event.
739   if (PERFETTO_UNLIKELY(info.proto_field_id ==
740                         protos::pbzero::FtraceEvent::kGenericFieldNumber)) {
741     nested->AppendString(GenericFtraceEvent::kEventNameFieldNumber, info.name);
742     for (const Field& field : info.fields) {
743       auto* generic_field = nested->BeginNestedMessage<protozero::Message>(
744           GenericFtraceEvent::kFieldFieldNumber);
745       generic_field->AppendString(GenericFtraceEvent::Field::kNameFieldNumber,
746                                   field.ftrace_name);
747       success &= ParseField(field, start, end, table, generic_field, metadata);
748     }
749   } else if (PERFETTO_UNLIKELY(
750                  info.proto_field_id ==
751                  protos::pbzero::FtraceEvent::kSysEnterFieldNumber)) {
752     success &= ParseSysEnter(info, start, end, nested, metadata);
753   } else if (PERFETTO_UNLIKELY(
754                  info.proto_field_id ==
755                  protos::pbzero::FtraceEvent::kSysExitFieldNumber)) {
756     success &= ParseSysExit(info, start, end, ds_config, nested, metadata);
757   } else if (PERFETTO_UNLIKELY(
758                  info.proto_field_id ==
759                  protos::pbzero::FtraceEvent::kKprobeEventFieldNumber)) {
760     KprobeEvent::KprobeType* elem = ds_config->kprobes.Find(ftrace_event_id);
761     nested->AppendString(KprobeEvent::kNameFieldNumber, info.name);
762     if (elem) {
763       nested->AppendVarInt(KprobeEvent::kTypeFieldNumber, *elem);
764     }
765   } else {  // Parse all other events.
766     for (const Field& field : info.fields) {
767       success &= ParseField(field, start, end, table, nested, metadata);
768     }
769   }
770 
771   if (PERFETTO_UNLIKELY(info.proto_field_id ==
772                         protos::pbzero::FtraceEvent::kTaskRenameFieldNumber)) {
773     // For task renames, we want to store that the pid was renamed. We use the
774     // common pid to reduce code complexity as in all the cases we care about,
775     // the common pid is the same as the renamed pid (the pid inside the event).
776     PERFETTO_DCHECK(metadata->last_seen_common_pid);
777     metadata->AddRenamePid(metadata->last_seen_common_pid);
778   }
779 
780   // This finalizes |nested| and |proto_field| automatically.
781   message->Finalize();
782   metadata->FinishEvent();
783   return success;
784 }
785 
786 // Caller must guarantee that the field fits in the range,
787 // explicitly: start + field.ftrace_offset + field.ftrace_size <= end
788 // The only exception is fields with strategy = kCStringToString
789 // where the total size isn't known up front. In this case ParseField
790 // will check the string terminates in the bounds and won't read past |end|.
ParseField(const Field & field,const uint8_t * start,const uint8_t * end,const ProtoTranslationTable * table,protozero::Message * message,FtraceMetadata * metadata)791 bool CpuReader::ParseField(const Field& field,
792                            const uint8_t* start,
793                            const uint8_t* end,
794                            const ProtoTranslationTable* table,
795                            protozero::Message* message,
796                            FtraceMetadata* metadata) {
797   PERFETTO_DCHECK(start + field.ftrace_offset + field.ftrace_size <= end);
798   const uint8_t* field_start = start + field.ftrace_offset;
799   uint32_t field_id = field.proto_field_id;
800 
801   switch (field.strategy) {
802     case kUint8ToUint32:
803     case kUint8ToUint64:
804       ReadIntoVarInt<uint8_t>(field_start, field_id, message);
805       return true;
806     case kUint16ToUint32:
807     case kUint16ToUint64:
808       ReadIntoVarInt<uint16_t>(field_start, field_id, message);
809       return true;
810     case kUint32ToUint32:
811     case kUint32ToUint64:
812       ReadIntoVarInt<uint32_t>(field_start, field_id, message);
813       return true;
814     case kUint64ToUint64:
815       ReadIntoVarInt<uint64_t>(field_start, field_id, message);
816       return true;
817     case kInt8ToInt32:
818     case kInt8ToInt64:
819       ReadIntoVarInt<int8_t>(field_start, field_id, message);
820       return true;
821     case kInt16ToInt32:
822     case kInt16ToInt64:
823       ReadIntoVarInt<int16_t>(field_start, field_id, message);
824       return true;
825     case kInt32ToInt32:
826     case kInt32ToInt64:
827       ReadIntoVarInt<int32_t>(field_start, field_id, message);
828       return true;
829     case kInt64ToInt64:
830       ReadIntoVarInt<int64_t>(field_start, field_id, message);
831       return true;
832     case kFixedCStringToString:
833       // TODO(hjd): Kernel-dive to check this how size:0 char fields work.
834       ReadIntoString(field_start, field.ftrace_size, field_id, message);
835       return true;
836     case kCStringToString:
837       // TODO(hjd): Kernel-dive to check this how size:0 char fields work.
838       ReadIntoString(field_start, static_cast<size_t>(end - field_start),
839                      field_id, message);
840       return true;
841     case kStringPtrToString: {
842       uint64_t n = 0;
843       // The ftrace field may be 8 or 4 bytes and we need to copy it into the
844       // bottom of n. In the unlikely case where the field is >8 bytes we
845       // should avoid making things worse by corrupting the stack but we
846       // don't need to handle it correctly.
847       size_t size = std::min<size_t>(field.ftrace_size, sizeof(n));
848       memcpy(base::AssumeLittleEndian(&n),
849              reinterpret_cast<const void*>(field_start), size);
850       // Look up the adddress in the printk format map and write it into the
851       // proto.
852       base::StringView name = table->LookupTraceString(n);
853       message->AppendBytes(field_id, name.begin(), name.size());
854       return true;
855     }
856     case kDataLocToString:
857       return ReadDataLoc(start, field_start, end, field, message);
858     case kBoolToUint32:
859     case kBoolToUint64:
860       ReadIntoVarInt<uint8_t>(field_start, field_id, message);
861       return true;
862     case kInode32ToUint64:
863       ReadInode<uint32_t>(field_start, field_id, message, metadata);
864       return true;
865     case kInode64ToUint64:
866       ReadInode<uint64_t>(field_start, field_id, message, metadata);
867       return true;
868     case kPid32ToInt32:
869     case kPid32ToInt64:
870       ReadPid(field_start, field_id, message, metadata);
871       return true;
872     case kCommonPid32ToInt32:
873     case kCommonPid32ToInt64:
874       ReadCommonPid(field_start, field_id, message, metadata);
875       return true;
876     case kDevId32ToUint64:
877       ReadDevId<uint32_t>(field_start, field_id, message, metadata);
878       return true;
879     case kDevId64ToUint64:
880       ReadDevId<uint64_t>(field_start, field_id, message, metadata);
881       return true;
882     case kFtraceSymAddr32ToUint64:
883       ReadSymbolAddr<uint32_t>(field_start, field_id, message, metadata);
884       return true;
885     case kFtraceSymAddr64ToUint64:
886       ReadSymbolAddr<uint64_t>(field_start, field_id, message, metadata);
887       return true;
888     case kInvalidTranslationStrategy:
889       break;
890   }
891   // Shouldn't reach this since we only attempt to parse fields that were
892   // validated by the proto translation table earlier.
893   return false;
894 }
895 
ParseSysEnter(const Event & info,const uint8_t * start,const uint8_t * end,protozero::Message * message,FtraceMetadata *)896 bool CpuReader::ParseSysEnter(const Event& info,
897                               const uint8_t* start,
898                               const uint8_t* end,
899                               protozero::Message* message,
900                               FtraceMetadata* /* metadata */) {
901   if (info.fields.size() != 2) {
902     PERFETTO_DLOG("Unexpected number of fields for sys_enter");
903     return false;
904   }
905   const auto& id_field = info.fields[0];
906   const auto& args_field = info.fields[1];
907   if (start + id_field.ftrace_size + args_field.ftrace_size > end) {
908     return false;
909   }
910   // field:long id;
911   if (id_field.ftrace_type != kFtraceInt32 &&
912       id_field.ftrace_type != kFtraceInt64) {
913     return false;
914   }
915   const int64_t syscall_id = ReadSignedFtraceValue(
916       start + id_field.ftrace_offset, id_field.ftrace_type);
917   message->AppendVarInt(id_field.proto_field_id, syscall_id);
918   // field:unsigned long args[6];
919   // proto_translation_table will only allow exactly 6-element array, so we can
920   // make the same hard assumption here.
921   constexpr uint16_t arg_count = 6;
922   size_t element_size = 0;
923   if (args_field.ftrace_type == kFtraceUint32) {
924     element_size = 4u;
925   } else if (args_field.ftrace_type == kFtraceUint64) {
926     element_size = 8u;
927   } else {
928     return false;
929   }
930   for (uint16_t i = 0; i < arg_count; ++i) {
931     const uint8_t* element_ptr =
932         start + args_field.ftrace_offset + i * element_size;
933     uint64_t arg_value = 0;
934     if (element_size == 8) {
935       arg_value = ReadValue<uint64_t>(element_ptr);
936     } else {
937       arg_value = ReadValue<uint32_t>(element_ptr);
938     }
939     message->AppendVarInt(args_field.proto_field_id, arg_value);
940   }
941   return true;
942 }
943 
ParseSysExit(const Event & info,const uint8_t * start,const uint8_t * end,const FtraceDataSourceConfig * ds_config,protozero::Message * message,FtraceMetadata * metadata)944 bool CpuReader::ParseSysExit(const Event& info,
945                              const uint8_t* start,
946                              const uint8_t* end,
947                              const FtraceDataSourceConfig* ds_config,
948                              protozero::Message* message,
949                              FtraceMetadata* metadata) {
950   if (info.fields.size() != 2) {
951     PERFETTO_DLOG("Unexpected number of fields for sys_exit");
952     return false;
953   }
954   const auto& id_field = info.fields[0];
955   const auto& ret_field = info.fields[1];
956   if (start + id_field.ftrace_size + ret_field.ftrace_size > end) {
957     return false;
958   }
959   //    field:long id;
960   if (id_field.ftrace_type != kFtraceInt32 &&
961       id_field.ftrace_type != kFtraceInt64) {
962     return false;
963   }
964   const int64_t syscall_id = ReadSignedFtraceValue(
965       start + id_field.ftrace_offset, id_field.ftrace_type);
966   message->AppendVarInt(id_field.proto_field_id, syscall_id);
967   //    field:long ret;
968   if (ret_field.ftrace_type != kFtraceInt32 &&
969       ret_field.ftrace_type != kFtraceInt64) {
970     return false;
971   }
972   const int64_t syscall_ret = ReadSignedFtraceValue(
973       start + ret_field.ftrace_offset, ret_field.ftrace_type);
974   message->AppendVarInt(ret_field.proto_field_id, syscall_ret);
975   // for any syscalls which return a new filedescriptor
976   // we mark the fd as potential candidate for scraping
977   // if the call succeeded and is within fd bounds
978   if (ds_config->syscalls_returning_fd.count(syscall_id) && syscall_ret >= 0 &&
979       syscall_ret <= std::numeric_limits<int>::max()) {
980     const auto pid = metadata->last_seen_common_pid;
981     const auto syscall_ret_u = static_cast<uint64_t>(syscall_ret);
982     metadata->fds.insert(std::make_pair(pid, syscall_ret_u));
983   }
984   return true;
985 }
986 
987 // Parse a sched_switch event according to pre-validated format, and buffer the
988 // individual fields in the current compact batch. See the code populating
989 // |CompactSchedSwitchFormat| for the assumptions made around the format, which
990 // this code is closely tied to.
991 // static
ParseSchedSwitchCompact(const uint8_t * start,uint64_t timestamp,const CompactSchedSwitchFormat * format,CompactSchedBuffer * compact_buf,FtraceMetadata * metadata)992 void CpuReader::ParseSchedSwitchCompact(const uint8_t* start,
993                                         uint64_t timestamp,
994                                         const CompactSchedSwitchFormat* format,
995                                         CompactSchedBuffer* compact_buf,
996                                         FtraceMetadata* metadata) {
997   compact_buf->sched_switch().AppendTimestamp(timestamp);
998 
999   int32_t next_pid = ReadValue<int32_t>(start + format->next_pid_offset);
1000   compact_buf->sched_switch().next_pid().Append(next_pid);
1001   metadata->AddPid(next_pid);
1002 
1003   int32_t next_prio = ReadValue<int32_t>(start + format->next_prio_offset);
1004   compact_buf->sched_switch().next_prio().Append(next_prio);
1005 
1006   // Varint encoding of int32 and int64 is the same, so treat the value as
1007   // int64 after reading.
1008   int64_t prev_state = ReadSignedFtraceValue(start + format->prev_state_offset,
1009                                              format->prev_state_type);
1010   compact_buf->sched_switch().prev_state().Append(prev_state);
1011 
1012   // next_comm
1013   const char* comm_ptr =
1014       reinterpret_cast<const char*>(start + format->next_comm_offset);
1015   size_t iid = compact_buf->interner().InternComm(comm_ptr);
1016   compact_buf->sched_switch().next_comm_index().Append(iid);
1017 }
1018 
1019 // static
ParseSchedWakingCompact(const uint8_t * start,uint64_t timestamp,const CompactSchedWakingFormat * format,CompactSchedBuffer * compact_buf,FtraceMetadata * metadata)1020 void CpuReader::ParseSchedWakingCompact(const uint8_t* start,
1021                                         uint64_t timestamp,
1022                                         const CompactSchedWakingFormat* format,
1023                                         CompactSchedBuffer* compact_buf,
1024                                         FtraceMetadata* metadata) {
1025   compact_buf->sched_waking().AppendTimestamp(timestamp);
1026 
1027   int32_t pid = ReadValue<int32_t>(start + format->pid_offset);
1028   compact_buf->sched_waking().pid().Append(pid);
1029   metadata->AddPid(pid);
1030 
1031   int32_t target_cpu = ReadValue<int32_t>(start + format->target_cpu_offset);
1032   compact_buf->sched_waking().target_cpu().Append(target_cpu);
1033 
1034   int32_t prio = ReadValue<int32_t>(start + format->prio_offset);
1035   compact_buf->sched_waking().prio().Append(prio);
1036 
1037   // comm
1038   const char* comm_ptr =
1039       reinterpret_cast<const char*>(start + format->comm_offset);
1040   size_t iid = compact_buf->interner().InternComm(comm_ptr);
1041   compact_buf->sched_waking().comm_index().Append(iid);
1042 
1043   uint32_t common_flags =
1044       ReadValue<uint8_t>(start + format->common_flags_offset);
1045   compact_buf->sched_waking().common_flags().Append(common_flags);
1046 }
1047 
1048 }  // namespace perfetto
1049