1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/importers/ftrace/ftrace_tokenizer.h"
18 #include <cstddef>
19 #include <cstdint>
20 #include <optional>
21 #include <utility>
22 #include <vector>
23
24 #include "perfetto/base/logging.h"
25 #include "perfetto/base/status.h"
26 #include "perfetto/protozero/field.h"
27 #include "perfetto/protozero/proto_decoder.h"
28 #include "perfetto/protozero/proto_utils.h"
29 #include "perfetto/public/compiler.h"
30 #include "perfetto/trace_processor/basic_types.h"
31 #include "perfetto/trace_processor/ref_counted.h"
32 #include "perfetto/trace_processor/trace_blob_view.h"
33 #include "src/trace_processor/importers/common/clock_tracker.h"
34 #include "src/trace_processor/importers/common/machine_tracker.h"
35 #include "src/trace_processor/importers/common/metadata_tracker.h"
36 #include "src/trace_processor/importers/common/parser_types.h"
37 #include "src/trace_processor/importers/proto/packet_sequence_state_generation.h"
38 #include "src/trace_processor/sorter/trace_sorter.h"
39 #include "src/trace_processor/storage/metadata.h"
40 #include "src/trace_processor/storage/stats.h"
41 #include "src/trace_processor/storage/trace_storage.h"
42 #include "src/trace_processor/types/variadic.h"
43 #include "src/trace_processor/util/status_macros.h"
44
45 #include "protos/perfetto/common/builtin_clock.pbzero.h"
46 #include "protos/perfetto/trace/ftrace/cpm_trace.pbzero.h"
47 #include "protos/perfetto/trace/ftrace/ftrace_event.pbzero.h"
48 #include "protos/perfetto/trace/ftrace/ftrace_event_bundle.pbzero.h"
49 #include "protos/perfetto/trace/ftrace/power.pbzero.h"
50 #include "protos/perfetto/trace/ftrace/thermal_exynos.pbzero.h"
51
52 namespace perfetto {
53 namespace trace_processor {
54
55 using protozero::ProtoDecoder;
56 using protozero::proto_utils::MakeTagVarInt;
57 using protozero::proto_utils::ParseVarInt;
58
59 using protos::pbzero::BuiltinClock;
60 using protos::pbzero::FtraceClock;
61 using protos::pbzero::FtraceEventBundle;
62
63 namespace {
64
65 static constexpr uint32_t kFtraceGlobalClockIdForOldKernels = 64;
66
67 // Fast path for parsing the event id of an ftrace event.
68 // Speculate on the fact that, if the timestamp was found, the common pid
69 // will appear immediately after and the event id immediately after that.
TryFastParseFtraceEventId(const uint8_t * start,const uint8_t * end)70 uint64_t TryFastParseFtraceEventId(const uint8_t* start, const uint8_t* end) {
71 constexpr auto kPidFieldNumber = protos::pbzero::FtraceEvent::kPidFieldNumber;
72 constexpr auto kPidFieldTag = MakeTagVarInt(kPidFieldNumber);
73
74 // If the next byte is not the common pid's tag, just skip the field.
75 constexpr uint32_t kMaxPidLength = 5;
76 if (PERFETTO_UNLIKELY(static_cast<uint32_t>(end - start) <= kMaxPidLength ||
77 start[0] != kPidFieldTag)) {
78 return 0;
79 }
80
81 // Skip the common pid.
82 uint64_t common_pid = 0;
83 const uint8_t* common_pid_end = ParseVarInt(start + 1, end, &common_pid);
84 if (PERFETTO_UNLIKELY(common_pid_end == start + 1)) {
85 return 0;
86 }
87
88 // Read the next varint: this should be the event id tag.
89 uint64_t event_tag = 0;
90 const uint8_t* event_id_end = ParseVarInt(common_pid_end, end, &event_tag);
91 if (event_id_end == common_pid_end) {
92 return 0;
93 }
94
95 constexpr uint8_t kFieldTypeNumBits = 3;
96 constexpr uint64_t kFieldTypeMask =
97 (1 << kFieldTypeNumBits) - 1; // 0000 0111;
98
99 // The event wire type should be length delimited.
100 auto wire_type = static_cast<protozero::proto_utils::ProtoWireType>(
101 event_tag & kFieldTypeMask);
102 if (wire_type != protozero::proto_utils::ProtoWireType::kLengthDelimited) {
103 return 0;
104 }
105 return event_tag >> kFieldTypeNumBits;
106 }
107
108 } // namespace
109
110 PERFETTO_ALWAYS_INLINE
TokenizeFtraceBundle(TraceBlobView bundle,RefPtr<PacketSequenceStateGeneration> state,uint32_t packet_sequence_id)111 base::Status FtraceTokenizer::TokenizeFtraceBundle(
112 TraceBlobView bundle,
113 RefPtr<PacketSequenceStateGeneration> state,
114 uint32_t packet_sequence_id) {
115 protos::pbzero::FtraceEventBundle::Decoder decoder(bundle.data(),
116 bundle.length());
117
118 if (PERFETTO_UNLIKELY(!decoder.has_cpu())) {
119 PERFETTO_ELOG("CPU field not found in FtraceEventBundle");
120 context_->storage->IncrementStats(stats::ftrace_bundle_tokenizer_errors);
121 return base::OkStatus();
122 }
123
124 uint32_t cpu = decoder.cpu();
125 static constexpr uint32_t kMaxCpuCount = 1024;
126 if (PERFETTO_UNLIKELY(cpu >= kMaxCpuCount)) {
127 return base::ErrStatus(
128 "CPU %u is greater than maximum allowed of %u. This is likely because "
129 "of trace corruption",
130 cpu, kMaxCpuCount);
131 }
132
133 if (PERFETTO_UNLIKELY(decoder.lost_events())) {
134 // If set, it means that the kernel overwrote an unspecified number of
135 // events since our last read from the per-cpu buffer.
136 context_->storage->SetIndexedStats(stats::ftrace_cpu_has_data_loss,
137 static_cast<int>(cpu), 1);
138 }
139
140 ClockTracker::ClockId clock_id;
141 switch (decoder.ftrace_clock()) {
142 case FtraceClock::FTRACE_CLOCK_UNSPECIFIED:
143 clock_id = BuiltinClock::BUILTIN_CLOCK_BOOTTIME;
144 break;
145 case FtraceClock::FTRACE_CLOCK_GLOBAL:
146 clock_id = ClockTracker::SequenceToGlobalClock(
147 packet_sequence_id, kFtraceGlobalClockIdForOldKernels);
148 break;
149 case FtraceClock::FTRACE_CLOCK_MONO_RAW:
150 clock_id = BuiltinClock::BUILTIN_CLOCK_MONOTONIC_RAW;
151 break;
152 case FtraceClock::FTRACE_CLOCK_LOCAL:
153 return base::ErrStatus("Unable to parse ftrace packets with local clock");
154 default:
155 return base::ErrStatus(
156 "Unable to parse ftrace packets with unknown clock");
157 }
158
159 if (decoder.has_ftrace_timestamp()) {
160 PERFETTO_DCHECK(clock_id != BuiltinClock::BUILTIN_CLOCK_BOOTTIME);
161 HandleFtraceClockSnapshot(decoder.ftrace_timestamp(),
162 decoder.boot_timestamp(), packet_sequence_id);
163 }
164
165 if (decoder.has_compact_sched()) {
166 TokenizeFtraceCompactSched(cpu, clock_id, decoder.compact_sched());
167 }
168
169 for (auto it = decoder.event(); it; ++it) {
170 TokenizeFtraceEvent(cpu, clock_id, bundle.slice(it->data(), it->size()),
171 state);
172 }
173
174 // First bundle on each cpu is special since ftrace is recorded in per-cpu
175 // buffers. In traces written by perfetto v44+ we know the timestamp from
176 // which this cpu's data stream is valid. This is important for parsing ring
177 // buffer traces, as not all per-cpu data streams will be valid from the same
178 // timestamp.
179 if (cpu >= per_cpu_seen_first_bundle_.size()) {
180 per_cpu_seen_first_bundle_.resize(cpu + 1);
181 }
182 if (!per_cpu_seen_first_bundle_[cpu]) {
183 per_cpu_seen_first_bundle_[cpu] = true;
184
185 // If this cpu's timestamp is the new max, update the metadata table entry.
186 // previous_bundle_end_timestamp is the replacement for
187 // last_read_event_timestamp on perfetto v47+, at most one will be set.
188 if (decoder.has_previous_bundle_end_timestamp() ||
189 decoder.has_last_read_event_timestamp()) {
190 uint64_t raw_ts = decoder.has_previous_bundle_end_timestamp()
191 ? decoder.previous_bundle_end_timestamp()
192 : decoder.last_read_event_timestamp();
193 int64_t timestamp = 0;
194 ASSIGN_OR_RETURN(timestamp, context_->clock_tracker->ToTraceTime(
195 clock_id, static_cast<int64_t>(raw_ts)));
196
197 std::optional<SqlValue> curr_latest_timestamp =
198 context_->metadata_tracker->GetMetadata(
199 metadata::ftrace_latest_data_start_ns);
200
201 if (!curr_latest_timestamp.has_value() ||
202 timestamp > curr_latest_timestamp->AsLong()) {
203 context_->metadata_tracker->SetMetadata(
204 metadata::ftrace_latest_data_start_ns,
205 Variadic::Integer(timestamp));
206 }
207 }
208 }
209 return base::OkStatus();
210 }
211
212 PERFETTO_ALWAYS_INLINE
TokenizeFtraceEvent(uint32_t cpu,ClockTracker::ClockId clock_id,TraceBlobView event,RefPtr<PacketSequenceStateGeneration> state)213 void FtraceTokenizer::TokenizeFtraceEvent(
214 uint32_t cpu,
215 ClockTracker::ClockId clock_id,
216 TraceBlobView event,
217 RefPtr<PacketSequenceStateGeneration> state) {
218 constexpr auto kTimestampFieldNumber =
219 protos::pbzero::FtraceEvent::kTimestampFieldNumber;
220 constexpr auto kTimestampFieldTag = MakeTagVarInt(kTimestampFieldNumber);
221
222 const uint8_t* data = event.data();
223 const size_t length = event.length();
224
225 // Speculate on the following sequence of varints
226 // - timestamp tag
227 // - timestamp (64 bit)
228 // - common pid tag
229 // - common pid (32 bit)
230 // - event tag
231 uint64_t raw_timestamp = 0;
232 bool timestamp_found = false;
233 uint64_t event_id = 0;
234 if (PERFETTO_LIKELY(length > 10 && data[0] == kTimestampFieldTag)) {
235 // Fastpath.
236 const uint8_t* ts_end = ParseVarInt(data + 1, data + 11, &raw_timestamp);
237 timestamp_found = ts_end != data + 1;
238 if (PERFETTO_LIKELY(timestamp_found)) {
239 event_id = TryFastParseFtraceEventId(ts_end, data + length);
240 }
241 }
242
243 // Slowpath for finding the timestamp.
244 if (PERFETTO_UNLIKELY(!timestamp_found)) {
245 ProtoDecoder decoder(data, length);
246 if (auto ts_field = decoder.FindField(kTimestampFieldNumber)) {
247 timestamp_found = true;
248 raw_timestamp = ts_field.as_uint64();
249 }
250 if (PERFETTO_UNLIKELY(!timestamp_found)) {
251 context_->storage->IncrementStats(stats::ftrace_bundle_tokenizer_errors);
252 return;
253 }
254 }
255
256 // Slowpath for finding the event id.
257 if (PERFETTO_UNLIKELY(event_id == 0)) {
258 ProtoDecoder decoder(data, length);
259 for (auto f = decoder.ReadField(); f.valid(); f = decoder.ReadField()) {
260 // Find the first length-delimited tag as this corresponds to the ftrace
261 // event.
262 if (f.type() == protozero::proto_utils::ProtoWireType::kLengthDelimited) {
263 event_id = f.id();
264 break;
265 }
266 }
267 if (PERFETTO_UNLIKELY(event_id == 0)) {
268 context_->storage->IncrementStats(stats::ftrace_missing_event_id);
269 return;
270 }
271 }
272
273 if (PERFETTO_UNLIKELY(
274 event_id == protos::pbzero::FtraceEvent::kGpuWorkPeriodFieldNumber)) {
275 TokenizeFtraceGpuWorkPeriod(cpu, std::move(event), std::move(state));
276 return;
277 } else if (PERFETTO_UNLIKELY(event_id ==
278 protos::pbzero::FtraceEvent::
279 kThermalExynosAcpmBulkFieldNumber)) {
280 TokenizeFtraceThermalExynosAcpmBulk(cpu, std::move(event),
281 std::move(state));
282 return;
283 } else if (PERFETTO_UNLIKELY(event_id ==
284 protos::pbzero::FtraceEvent::
285 kParamSetValueCpmFieldNumber)) {
286 TokenizeFtraceParamSetValueCpm(cpu, std::move(event), std::move(state));
287 return;
288 }
289
290 auto timestamp = context_->clock_tracker->ToTraceTime(
291 clock_id, static_cast<int64_t>(raw_timestamp));
292 // ClockTracker will increment some error stats if it failed to convert the
293 // timestamp so just return.
294 if (!timestamp.ok()) {
295 DlogWithLimit(timestamp.status());
296 return;
297 }
298
299 context_->sorter->PushFtraceEvent(cpu, *timestamp, std::move(event),
300 std::move(state), context_->machine_id());
301 }
302
303 PERFETTO_ALWAYS_INLINE
TokenizeFtraceCompactSched(uint32_t cpu,ClockTracker::ClockId clock_id,protozero::ConstBytes packet)304 void FtraceTokenizer::TokenizeFtraceCompactSched(uint32_t cpu,
305 ClockTracker::ClockId clock_id,
306 protozero::ConstBytes packet) {
307 FtraceEventBundle::CompactSched::Decoder compact_sched(packet);
308
309 // Build the interning table for comm fields.
310 std::vector<StringId> string_table;
311 string_table.reserve(512);
312 for (auto it = compact_sched.intern_table(); it; it++) {
313 StringId value = context_->storage->InternString(*it);
314 string_table.push_back(value);
315 }
316
317 TokenizeFtraceCompactSchedSwitch(cpu, clock_id, compact_sched, string_table);
318 TokenizeFtraceCompactSchedWaking(cpu, clock_id, compact_sched, string_table);
319 }
320
TokenizeFtraceCompactSchedSwitch(uint32_t cpu,ClockTracker::ClockId clock_id,const FtraceEventBundle::CompactSched::Decoder & compact,const std::vector<StringId> & string_table)321 void FtraceTokenizer::TokenizeFtraceCompactSchedSwitch(
322 uint32_t cpu,
323 ClockTracker::ClockId clock_id,
324 const FtraceEventBundle::CompactSched::Decoder& compact,
325 const std::vector<StringId>& string_table) {
326 // Accumulator for timestamp deltas.
327 int64_t timestamp_acc = 0;
328
329 // The events' fields are stored in a structure-of-arrays style, using packed
330 // repeated fields. Walk each repeated field in step to recover individual
331 // events.
332 bool parse_error = false;
333 auto timestamp_it = compact.switch_timestamp(&parse_error);
334 auto pstate_it = compact.switch_prev_state(&parse_error);
335 auto npid_it = compact.switch_next_pid(&parse_error);
336 auto nprio_it = compact.switch_next_prio(&parse_error);
337 auto comm_it = compact.switch_next_comm_index(&parse_error);
338 for (; timestamp_it && pstate_it && npid_it && nprio_it && comm_it;
339 ++timestamp_it, ++pstate_it, ++npid_it, ++nprio_it, ++comm_it) {
340 InlineSchedSwitch event{};
341
342 // delta-encoded timestamp
343 timestamp_acc += static_cast<int64_t>(*timestamp_it);
344 int64_t event_timestamp = timestamp_acc;
345
346 // index into the interned string table
347 if (PERFETTO_UNLIKELY(*comm_it >= string_table.size())) {
348 parse_error = true;
349 break;
350 }
351 event.next_comm = string_table[*comm_it];
352
353 event.prev_state = *pstate_it;
354 event.next_pid = *npid_it;
355 event.next_prio = *nprio_it;
356
357 auto timestamp =
358 context_->clock_tracker->ToTraceTime(clock_id, event_timestamp);
359 if (!timestamp.ok()) {
360 DlogWithLimit(timestamp.status());
361 return;
362 }
363 context_->sorter->PushInlineFtraceEvent(cpu, *timestamp, event,
364 context_->machine_id());
365 }
366
367 // Check that all packed buffers were decoded correctly, and fully.
368 bool sizes_match =
369 !timestamp_it && !pstate_it && !npid_it && !nprio_it && !comm_it;
370 if (parse_error || !sizes_match)
371 context_->storage->IncrementStats(stats::compact_sched_has_parse_errors);
372 }
373
TokenizeFtraceCompactSchedWaking(uint32_t cpu,ClockTracker::ClockId clock_id,const FtraceEventBundle::CompactSched::Decoder & compact,const std::vector<StringId> & string_table)374 void FtraceTokenizer::TokenizeFtraceCompactSchedWaking(
375 uint32_t cpu,
376 ClockTracker::ClockId clock_id,
377 const FtraceEventBundle::CompactSched::Decoder& compact,
378 const std::vector<StringId>& string_table) {
379 // Accumulator for timestamp deltas.
380 int64_t timestamp_acc = 0;
381
382 // The events' fields are stored in a structure-of-arrays style, using packed
383 // repeated fields. Walk each repeated field in step to recover individual
384 // events.
385 bool parse_error = false;
386 auto timestamp_it = compact.waking_timestamp(&parse_error);
387 auto pid_it = compact.waking_pid(&parse_error);
388 auto tcpu_it = compact.waking_target_cpu(&parse_error);
389 auto prio_it = compact.waking_prio(&parse_error);
390 auto comm_it = compact.waking_comm_index(&parse_error);
391 auto common_flags_it = compact.waking_common_flags(&parse_error);
392
393 for (; timestamp_it && pid_it && tcpu_it && prio_it && comm_it;
394 ++timestamp_it, ++pid_it, ++tcpu_it, ++prio_it, ++comm_it) {
395 InlineSchedWaking event{};
396
397 // delta-encoded timestamp
398 timestamp_acc += static_cast<int64_t>(*timestamp_it);
399 int64_t event_timestamp = timestamp_acc;
400
401 // index into the interned string table
402 if (PERFETTO_UNLIKELY(*comm_it >= string_table.size())) {
403 parse_error = true;
404 break;
405 }
406 event.comm = string_table[*comm_it];
407
408 event.pid = *pid_it;
409 event.target_cpu = static_cast<uint16_t>(*tcpu_it);
410 event.prio = static_cast<uint16_t>(*prio_it);
411
412 if (common_flags_it) {
413 event.common_flags = static_cast<uint16_t>(*common_flags_it);
414 common_flags_it++;
415 }
416
417 auto timestamp =
418 context_->clock_tracker->ToTraceTime(clock_id, event_timestamp);
419 if (!timestamp.ok()) {
420 DlogWithLimit(timestamp.status());
421 return;
422 }
423 context_->sorter->PushInlineFtraceEvent(cpu, *timestamp, event,
424 context_->machine_id());
425 }
426
427 // Check that all packed buffers were decoded correctly, and fully.
428 bool sizes_match =
429 !timestamp_it && !pid_it && !tcpu_it && !prio_it && !comm_it;
430 if (parse_error || !sizes_match)
431 context_->storage->IncrementStats(stats::compact_sched_has_parse_errors);
432 }
433
HandleFtraceClockSnapshot(int64_t ftrace_ts,int64_t boot_ts,uint32_t packet_sequence_id)434 void FtraceTokenizer::HandleFtraceClockSnapshot(int64_t ftrace_ts,
435 int64_t boot_ts,
436 uint32_t packet_sequence_id) {
437 // If we've already seen a snapshot at this timestamp, don't unnecessarily
438 // add another entry to the clock tracker.
439 if (latest_ftrace_clock_snapshot_ts_ == ftrace_ts)
440 return;
441 latest_ftrace_clock_snapshot_ts_ = ftrace_ts;
442
443 ClockTracker::ClockId global_id = ClockTracker::SequenceToGlobalClock(
444 packet_sequence_id, kFtraceGlobalClockIdForOldKernels);
445 context_->clock_tracker->AddSnapshot(
446 {ClockTracker::ClockTimestamp(global_id, ftrace_ts),
447 ClockTracker::ClockTimestamp(BuiltinClock::BUILTIN_CLOCK_BOOTTIME,
448 boot_ts)});
449 }
450
TokenizeFtraceGpuWorkPeriod(uint32_t cpu,TraceBlobView event,RefPtr<PacketSequenceStateGeneration> state)451 void FtraceTokenizer::TokenizeFtraceGpuWorkPeriod(
452 uint32_t cpu,
453 TraceBlobView event,
454 RefPtr<PacketSequenceStateGeneration> state) {
455 // Special handling of valid gpu_work_period tracepoint events which contain
456 // timestamp values for the GPU time period nested inside the event data.
457 auto ts_field = GetFtraceEventField(
458 protos::pbzero::FtraceEvent::kGpuWorkPeriodFieldNumber, event);
459 if (!ts_field.has_value()) return;
460
461 protos::pbzero::GpuWorkPeriodFtraceEvent::Decoder gpu_work_event(
462 ts_field.value().data(), ts_field.value().size());
463 if (!gpu_work_event.has_start_time_ns()) {
464 context_->storage->IncrementStats(stats::ftrace_bundle_tokenizer_errors);
465 return;
466 }
467 uint64_t raw_timestamp = gpu_work_event.start_time_ns();
468
469 // Enforce clock type for the event data to be CLOCK_MONOTONIC_RAW
470 // as specified, to calculate the timestamp correctly.
471 auto timestamp = context_->clock_tracker->ToTraceTime(
472 BuiltinClock::BUILTIN_CLOCK_MONOTONIC_RAW,
473 static_cast<int64_t>(raw_timestamp));
474
475 // ClockTracker will increment some error stats if it failed to convert the
476 // timestamp so just return.
477 if (!timestamp.ok()) {
478 DlogWithLimit(timestamp.status());
479 return;
480 }
481
482 context_->sorter->PushFtraceEvent(cpu, *timestamp, std::move(event),
483 std::move(state), context_->machine_id());
484 }
485
TokenizeFtraceThermalExynosAcpmBulk(uint32_t cpu,TraceBlobView event,RefPtr<PacketSequenceStateGeneration> state)486 void FtraceTokenizer::TokenizeFtraceThermalExynosAcpmBulk(
487 uint32_t cpu,
488 TraceBlobView event,
489 RefPtr<PacketSequenceStateGeneration> state) {
490 // Special handling of valid thermal_exynos_acpm_bulk tracepoint events which
491 // contains the right timestamp value nested inside the event data.
492 auto ts_field = GetFtraceEventField(
493 protos::pbzero::FtraceEvent::kThermalExynosAcpmBulkFieldNumber, event);
494 if (!ts_field.has_value()) return;
495
496 protos::pbzero::ThermalExynosAcpmBulkFtraceEvent::Decoder
497 thermal_exynos_acpm_bulk_event(ts_field.value().data(),
498 ts_field.value().size());
499 if (!thermal_exynos_acpm_bulk_event.has_timestamp()) {
500 context_->storage->IncrementStats(stats::ftrace_bundle_tokenizer_errors);
501 return;
502 }
503 int64_t timestamp =
504 static_cast<int64_t>(thermal_exynos_acpm_bulk_event.timestamp());
505 context_->sorter->PushFtraceEvent(cpu, timestamp, std::move(event),
506 std::move(state), context_->machine_id());
507 }
508
TokenizeFtraceParamSetValueCpm(uint32_t cpu,TraceBlobView event,RefPtr<PacketSequenceStateGeneration> state)509 void FtraceTokenizer::TokenizeFtraceParamSetValueCpm(
510 uint32_t cpu, TraceBlobView event,
511 RefPtr<PacketSequenceStateGeneration> state) {
512 // Special handling of valid param_set_value_cpm tracepoint events which
513 // contains the right timestamp value nested inside the event data.
514 auto ts_field = GetFtraceEventField(
515 protos::pbzero::FtraceEvent::kParamSetValueCpmFieldNumber, event);
516 if (!ts_field.has_value()) return;
517
518 protos::pbzero::ParamSetValueCpmFtraceEvent::Decoder
519 param_set_value_cpm_event(ts_field.value().data(),
520 ts_field.value().size());
521 if (!param_set_value_cpm_event.has_timestamp()) {
522 context_->storage->IncrementStats(stats::ftrace_bundle_tokenizer_errors);
523 return;
524 }
525 int64_t timestamp =
526 static_cast<int64_t>(param_set_value_cpm_event.timestamp());
527 context_->sorter->PushFtraceEvent(cpu, timestamp, std::move(event),
528 std::move(state), context_->machine_id());
529 }
530
GetFtraceEventField(uint32_t event_id,const TraceBlobView & event)531 std::optional<protozero::Field> FtraceTokenizer::GetFtraceEventField(
532 uint32_t event_id, const TraceBlobView& event) {
533 // Extract ftrace event field by decoding event trace blob.
534 const uint8_t* data = event.data();
535 const size_t length = event.length();
536
537 ProtoDecoder decoder(data, length);
538 auto ts_field = decoder.FindField(event_id);
539 if (!ts_field.valid()) {
540 context_->storage->IncrementStats(stats::ftrace_bundle_tokenizer_errors);
541 return std::nullopt;
542 }
543 return ts_field;
544 }
545
546 } // namespace trace_processor
547 } // namespace perfetto
548