xref: /aosp_15_r20/external/perfetto/src/tracing/service/trace_buffer.cc (revision 6dbdd20afdafa5e3ca9b8809fa73465d530080dc)
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/tracing/service/trace_buffer.h"
18 
19 #include <limits>
20 
21 #include "perfetto/base/logging.h"
22 #include "perfetto/ext/base/utils.h"
23 #include "perfetto/ext/tracing/core/client_identity.h"
24 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
25 #include "perfetto/ext/tracing/core/trace_packet.h"
26 #include "perfetto/protozero/proto_utils.h"
27 
28 #define TRACE_BUFFER_VERBOSE_LOGGING() 0  // Set to 1 when debugging unittests.
29 #if TRACE_BUFFER_VERBOSE_LOGGING()
30 #define TRACE_BUFFER_DLOG PERFETTO_DLOG
31 #else
32 #define TRACE_BUFFER_DLOG(...) void()
33 #endif
34 
35 namespace perfetto {
36 
37 namespace {
38 constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
39     SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
40 constexpr uint8_t kLastPacketContinuesOnNextChunk =
41     SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
42 constexpr uint8_t kChunkNeedsPatching =
43     SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
44 }  // namespace.
45 
46 const size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
47 
48 // static
Create(size_t size_in_bytes,OverwritePolicy pol)49 std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes,
50                                                  OverwritePolicy pol) {
51   std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer(pol));
52   if (!trace_buffer->Initialize(size_in_bytes))
53     return nullptr;
54   return trace_buffer;
55 }
56 
TraceBuffer(OverwritePolicy pol)57 TraceBuffer::TraceBuffer(OverwritePolicy pol) : overwrite_policy_(pol) {
58   // See comments in ChunkRecord for the rationale of this.
59   static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
60                                            sizeof(SharedMemoryABI::ChunkHeader),
61                 "ChunkRecord out of sync with the layout of SharedMemoryABI");
62 }
63 
64 TraceBuffer::~TraceBuffer() = default;
65 
Initialize(size_t size)66 bool TraceBuffer::Initialize(size_t size) {
67   static_assert(
68       SharedMemoryABI::kMinPageSize % sizeof(ChunkRecord) == 0,
69       "sizeof(ChunkRecord) must be an integer divider of a page size");
70   auto max_size = std::numeric_limits<decltype(ChunkMeta::record_off)>::max();
71   PERFETTO_CHECK(size <= static_cast<size_t>(max_size));
72   data_ = base::PagedMemory::Allocate(
73       size, base::PagedMemory::kMayFail | base::PagedMemory::kDontCommit);
74   if (!data_.IsValid()) {
75     PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
76     return false;
77   }
78   size_ = size;
79   used_size_ = 0;
80   stats_.set_buffer_size(size);
81   max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
82   wptr_ = begin();
83   index_.clear();
84   last_chunk_id_written_.clear();
85   read_iter_ = GetReadIterForSequence(index_.end());
86   return true;
87 }
88 
89 // Note: |src| points to a shmem region that is shared with the producer. Assume
90 // that the producer is malicious and will change the content of |src|
91 // while we execute here. Don't do any processing on it other than memcpy().
CopyChunkUntrusted(ProducerID producer_id_trusted,const ClientIdentity & client_identity_trusted,WriterID writer_id,ChunkID chunk_id,uint16_t num_fragments,uint8_t chunk_flags,bool chunk_complete,const uint8_t * src,size_t size)92 void TraceBuffer::CopyChunkUntrusted(
93     ProducerID producer_id_trusted,
94     const ClientIdentity& client_identity_trusted,
95     WriterID writer_id,
96     ChunkID chunk_id,
97     uint16_t num_fragments,
98     uint8_t chunk_flags,
99     bool chunk_complete,
100     const uint8_t* src,
101     size_t size) {
102   PERFETTO_CHECK(!read_only_);
103 
104   // |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
105   // up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
106   const size_t record_size =
107       base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
108   TRACE_BUFFER_DLOG("CopyChunk @ %" PRIdPTR ", size=%zu", wptr_ - begin(), record_size);
109   if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
110     stats_.set_abi_violations(stats_.abi_violations() + 1);
111     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
112     return;
113   }
114 
115   has_data_ = true;
116 #if PERFETTO_DCHECK_IS_ON()
117   changed_since_last_read_ = true;
118 #endif
119 
120   // If the chunk hasn't been completed, we should only consider the first
121   // |num_fragments - 1| packets complete. For simplicity, we simply disregard
122   // the last one when we copy the chunk.
123   if (PERFETTO_UNLIKELY(!chunk_complete)) {
124     if (num_fragments > 0) {
125       num_fragments--;
126       // These flags should only affect the last packet in the chunk. We clear
127       // them, so that TraceBuffer is able to look at the remaining packets in
128       // this chunk.
129       chunk_flags &= ~kLastPacketContinuesOnNextChunk;
130       chunk_flags &= ~kChunkNeedsPatching;
131     }
132   }
133 
134   ChunkRecord record(record_size);
135   record.producer_id = producer_id_trusted;
136   record.chunk_id = chunk_id;
137   record.writer_id = writer_id;
138   record.num_fragments = num_fragments;
139   record.flags = chunk_flags & ChunkRecord::kFlagsBitMask;
140   ChunkMeta::Key key(record);
141 
142   // Check whether we have already copied the same chunk previously. This may
143   // happen if the service scrapes chunks in a potentially incomplete state
144   // before receiving commit requests for them from the producer. Note that the
145   // service may scrape and thus override chunks in arbitrary order since the
146   // chunks aren't ordered in the SMB.
147   const auto it = index_.find(key);
148   if (PERFETTO_UNLIKELY(it != index_.end())) {
149     ChunkMeta* record_meta = &it->second;
150     ChunkRecord* prev = GetChunkRecordAt(begin() + record_meta->record_off);
151 
152     // Verify that the old chunk's metadata corresponds to the new one.
153     // Overridden chunks should never change size, since the page layout is
154     // fixed per writer. The number of fragments should also never decrease and
155     // flags should not be removed.
156     if (PERFETTO_UNLIKELY(ChunkMeta::Key(*prev) != key ||
157                           prev->size != record_size ||
158                           prev->num_fragments > num_fragments ||
159                           (prev->flags & chunk_flags) != prev->flags)) {
160       stats_.set_abi_violations(stats_.abi_violations() + 1);
161       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
162       return;
163     }
164 
165     // If this chunk was previously copied with the same number of fragments and
166     // the number didn't change, there's no need to copy it again. If the
167     // previous chunk was complete already, this should always be the case.
168     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_ ||
169                     !record_meta->is_complete() ||
170                     (chunk_complete && prev->num_fragments == num_fragments));
171     if (prev->num_fragments == num_fragments) {
172       TRACE_BUFFER_DLOG("  skipping recommit of identical chunk");
173       return;
174     }
175 
176     // If we've already started reading from chunk N+1 following this chunk N,
177     // don't override chunk N. Otherwise we may end up reading a packet from
178     // chunk N after having read from chunk N+1, thereby violating sequential
179     // read of packets. This shouldn't happen if the producer is well-behaved,
180     // because it shouldn't start chunk N+1 before completing chunk N.
181     ChunkMeta::Key subsequent_key = key;
182     static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
183                   "ChunkID wraps");
184     subsequent_key.chunk_id++;
185     const auto subsequent_it = index_.find(subsequent_key);
186     if (subsequent_it != index_.end() &&
187         subsequent_it->second.num_fragments_read > 0) {
188       stats_.set_abi_violations(stats_.abi_violations() + 1);
189       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
190       return;
191     }
192 
193     // We should not have read past the last packet.
194     if (record_meta->num_fragments_read > prev->num_fragments) {
195       PERFETTO_ELOG(
196           "TraceBuffer read too many fragments from an incomplete chunk");
197       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
198       return;
199     }
200 
201     uint8_t* wptr = reinterpret_cast<uint8_t*>(prev);
202     TRACE_BUFFER_DLOG("  overriding chunk @ %" PRIdPTR ", size=%zu", wptr - begin(),
203                       record_size);
204 
205     // Update chunk meta data stored in the index, as it may have changed.
206     record_meta->num_fragments = num_fragments;
207     record_meta->flags = chunk_flags;
208     record_meta->set_complete(chunk_complete);
209 
210     // Override the ChunkRecord contents at the original |wptr|.
211     TRACE_BUFFER_DLOG("  copying @ [%" PRIdPTR " - %" PRIdPTR "] %zu", wptr - begin(),
212                       uintptr_t(wptr - begin()) + record_size, record_size);
213     WriteChunkRecord(wptr, record, src, size);
214     TRACE_BUFFER_DLOG("Chunk raw: %s",
215                       base::HexDump(wptr, record_size).c_str());
216     stats_.set_chunks_rewritten(stats_.chunks_rewritten() + 1);
217     return;
218   }
219 
220   if (PERFETTO_UNLIKELY(discard_writes_))
221     return DiscardWrite();
222 
223   // If there isn't enough room from the given write position. Write a padding
224   // record to clear the end of the buffer and wrap back.
225   const size_t cached_size_to_end = size_to_end();
226   if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
227     ssize_t res = DeleteNextChunksFor(cached_size_to_end);
228     if (res == -1)
229       return DiscardWrite();
230     PERFETTO_DCHECK(static_cast<size_t>(res) <= cached_size_to_end);
231     AddPaddingRecord(cached_size_to_end);
232     wptr_ = begin();
233     stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
234     PERFETTO_DCHECK(size_to_end() >= record_size);
235   }
236 
237   // At this point either |wptr_| points to an untouched part of the buffer
238   // (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
239   // In the latter case we need to first figure out where the next valid
240   // ChunkRecord is (if it exists) and add padding between the new record.
241   // Example ((w) == write cursor):
242   //
243   // Initial state (wtpr_ == 0):
244   // |0 (w)    |10               |30                  |50
245   // +---------+-----------------+--------------------+--------------------+
246   // | Chunk 1 | Chunk 2         | Chunk 3            | Chunk 4            |
247   // +---------+-----------------+--------------------+--------------------+
248   //
249   // Let's assume we now want now write a 5th Chunk of size == 35. The final
250   // state should look like this:
251   // |0                                |35 (w)         |50
252   // +---------------------------------+---------------+--------------------+
253   // | Chunk 5                         | Padding Chunk | Chunk 4            |
254   // +---------------------------------+---------------+--------------------+
255 
256   // Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
257   ssize_t del_res = DeleteNextChunksFor(record_size);
258   if (del_res == -1)
259     return DiscardWrite();
260   size_t padding_size = static_cast<size_t>(del_res);
261 
262   // Now first insert the new chunk. At the end, if necessary, add the padding.
263   stats_.set_chunks_written(stats_.chunks_written() + 1);
264   stats_.set_bytes_written(stats_.bytes_written() + record_size);
265 
266   uint32_t chunk_off = GetOffset(GetChunkRecordAt(wptr_));
267   auto it_and_inserted =
268       index_.emplace(key, ChunkMeta(chunk_off, num_fragments, chunk_complete,
269                                     chunk_flags, client_identity_trusted));
270   PERFETTO_DCHECK(it_and_inserted.second);
271   TRACE_BUFFER_DLOG("  copying @ [%" PRIdPTR " - %" PRIdPTR "] %zu", wptr_ - begin(),
272                     uintptr_t(wptr_ - begin()) + record_size, record_size);
273   WriteChunkRecord(wptr_, record, src, size);
274   TRACE_BUFFER_DLOG("Chunk raw: %s", base::HexDump(wptr_, record_size).c_str());
275   wptr_ += record_size;
276   if (wptr_ >= end()) {
277     PERFETTO_DCHECK(padding_size == 0);
278     wptr_ = begin();
279     stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
280   }
281   DcheckIsAlignedAndWithinBounds(wptr_);
282 
283   // Chunks may be received out of order, so only update last_chunk_id if the
284   // new chunk_id is larger. But take into account overflows by only selecting
285   // the new ID if its distance to the latest ID is smaller than half the number
286   // space.
287   //
288   // This accounts for both the case where the new ID has just overflown and
289   // last_chunk_id be updated even though it's smaller (e.g. |chunk_id| = 1 and
290   // |last_chunk_id| = kMaxChunkId; chunk_id - last_chunk_id = 0) and the case
291   // where the new ID is an out-of-order ID right after an overflow and
292   // last_chunk_id shouldn't be updated even though it's larger (e.g. |chunk_id|
293   // = kMaxChunkId and |last_chunk_id| = 1; chunk_id - last_chunk_id =
294   // kMaxChunkId - 1).
295   auto producer_and_writer_id = std::make_pair(producer_id_trusted, writer_id);
296   ChunkID& last_chunk_id = last_chunk_id_written_[producer_and_writer_id];
297   static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
298                 "This code assumes that ChunkID wraps at kMaxChunkID");
299   if (chunk_id - last_chunk_id < kMaxChunkID / 2) {
300     last_chunk_id = chunk_id;
301   } else {
302     stats_.set_chunks_committed_out_of_order(
303         stats_.chunks_committed_out_of_order() + 1);
304   }
305 
306   if (padding_size)
307     AddPaddingRecord(padding_size);
308 }
309 
DeleteNextChunksFor(size_t bytes_to_clear)310 ssize_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
311   PERFETTO_CHECK(!discard_writes_);
312 
313   // Find the position of the first chunk which begins at or after
314   // (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
315   // either reach the end of the buffer or a zeroed region of the buffer.
316   uint8_t* next_chunk_ptr = wptr_;
317   uint8_t* search_end = wptr_ + bytes_to_clear;
318   TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
319   DcheckIsAlignedAndWithinBounds(wptr_);
320   PERFETTO_DCHECK(search_end <= end());
321   std::vector<ChunkMap::iterator> index_delete;
322   uint64_t chunks_overwritten = stats_.chunks_overwritten();
323   uint64_t bytes_overwritten = stats_.bytes_overwritten();
324   uint64_t padding_bytes_cleared = stats_.padding_bytes_cleared();
325   while (next_chunk_ptr < search_end) {
326     const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
327     TRACE_BUFFER_DLOG(
328         "  scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
329         next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
330 
331     // We just reached the untouched part of the buffer, it's going to be all
332     // zeroes from here to end().
333     // Optimization: if during Initialize() we fill the buffer with padding
334     // records we could get rid of this branch.
335     if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
336       // This should happen only at the first iteration. The zeroed area can
337       // only begin precisely at the |wptr_|, not after. Otherwise it means that
338       // we wrapped but screwed up the ChunkRecord chain.
339       PERFETTO_DCHECK(next_chunk_ptr == wptr_);
340       return 0;
341     }
342 
343     // Remove |next_chunk| from the index, unless it's a padding record (padding
344     // records are not part of the index).
345     if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
346       ChunkMeta::Key key(next_chunk);
347       auto it = index_.find(key);
348       bool will_remove = false;
349       if (PERFETTO_LIKELY(it != index_.end())) {
350         const ChunkMeta& meta = it->second;
351         if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments)) {
352           if (overwrite_policy_ == kDiscard)
353             return -1;
354           chunks_overwritten++;
355           bytes_overwritten += next_chunk.size;
356         }
357         index_delete.push_back(it);
358         will_remove = true;
359       }
360       TRACE_BUFFER_DLOG(
361           "  del index {%" PRIu32 ",%" PRIu32 ",%u} @ [%" PRIdPTR " - %" PRIdPTR "] %d",
362           key.producer_id, key.writer_id, key.chunk_id,
363           next_chunk_ptr - begin(), next_chunk_ptr - begin() + next_chunk.size,
364           will_remove);
365       PERFETTO_DCHECK(will_remove);
366     } else {
367       padding_bytes_cleared += next_chunk.size;
368     }
369 
370     next_chunk_ptr += next_chunk.size;
371 
372     // We should never hit this, unless we managed to screw up while writing
373     // to the buffer and breaking the ChunkRecord(s) chain.
374     // TODO(primiano): Write more meaningful logging with the status of the
375     // buffer, to get more actionable bugs in case we hit this.
376     PERFETTO_CHECK(next_chunk_ptr <= end());
377   }
378 
379   // Remove from the index.
380   for (auto it : index_delete) {
381     index_.erase(it);
382   }
383   stats_.set_chunks_overwritten(chunks_overwritten);
384   stats_.set_bytes_overwritten(bytes_overwritten);
385   stats_.set_padding_bytes_cleared(padding_bytes_cleared);
386 
387   PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
388   return static_cast<ssize_t>(next_chunk_ptr - search_end);
389 }
390 
AddPaddingRecord(size_t size)391 void TraceBuffer::AddPaddingRecord(size_t size) {
392   PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
393   ChunkRecord record(size);
394   record.is_padding = 1;
395   TRACE_BUFFER_DLOG("AddPaddingRecord @ [%" PRIdPTR " - %" PRIdPTR "] %zu", wptr_ - begin(),
396                     uintptr_t(wptr_ - begin()) + size, size);
397   WriteChunkRecord(wptr_, record, nullptr, size - sizeof(ChunkRecord));
398   stats_.set_padding_bytes_written(stats_.padding_bytes_written() + size);
399   // |wptr_| is deliberately not advanced when writing a padding record.
400 }
401 
TryPatchChunkContents(ProducerID producer_id,WriterID writer_id,ChunkID chunk_id,const Patch * patches,size_t patches_size,bool other_patches_pending)402 bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
403                                         WriterID writer_id,
404                                         ChunkID chunk_id,
405                                         const Patch* patches,
406                                         size_t patches_size,
407                                         bool other_patches_pending) {
408   PERFETTO_CHECK(!read_only_);
409   ChunkMeta::Key key(producer_id, writer_id, chunk_id);
410   auto it = index_.find(key);
411   if (it == index_.end()) {
412     stats_.set_patches_failed(stats_.patches_failed() + 1);
413     return false;
414   }
415   ChunkMeta& chunk_meta = it->second;
416 
417   // Check that the index is consistent with the actual ProducerID/WriterID
418   // stored in the ChunkRecord.
419 
420   ChunkRecord* chunk_record = GetChunkRecordAt(begin() + chunk_meta.record_off);
421   PERFETTO_DCHECK(ChunkMeta::Key(*chunk_record) == key);
422   uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_record);
423   PERFETTO_DCHECK(chunk_begin >= begin());
424   uint8_t* chunk_end = chunk_begin + chunk_record->size;
425   PERFETTO_DCHECK(chunk_end <= end());
426 
427   static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
428                 "Patch::kSize out of sync with SharedMemoryABI");
429 
430   for (size_t i = 0; i < patches_size; i++) {
431     uint8_t* ptr =
432         chunk_begin + sizeof(ChunkRecord) + patches[i].offset_untrusted;
433     TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
434                       ",%u} size=%zu @ %zu with {%02x %02x %02x %02x} cur "
435                       "{%02x %02x %02x %02x}",
436                       producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
437                       patches[i].offset_untrusted, patches[i].data[0],
438                       patches[i].data[1], patches[i].data[2],
439                       patches[i].data[3], ptr[0], ptr[1], ptr[2], ptr[3]);
440     if (ptr < chunk_begin + sizeof(ChunkRecord) ||
441         ptr > chunk_end - Patch::kSize) {
442       // Either the IPC was so slow and in the meantime the writer managed to
443       // wrap over |chunk_id| or the producer sent a malicious IPC.
444       stats_.set_patches_failed(stats_.patches_failed() + 1);
445       return false;
446     }
447 
448     memcpy(ptr, &patches[i].data[0], Patch::kSize);
449   }
450   TRACE_BUFFER_DLOG("Chunk raw (after patch): %s",
451                     base::HexDump(chunk_begin, chunk_record->size).c_str());
452 
453   stats_.set_patches_succeeded(stats_.patches_succeeded() + patches_size);
454   if (!other_patches_pending) {
455     chunk_meta.flags &= ~kChunkNeedsPatching;
456     chunk_record->flags = chunk_meta.flags & ChunkRecord::kFlagsBitMask;
457   }
458   return true;
459 }
460 
BeginRead()461 void TraceBuffer::BeginRead() {
462   read_iter_ = GetReadIterForSequence(index_.begin());
463 #if PERFETTO_DCHECK_IS_ON()
464   changed_since_last_read_ = false;
465 #endif
466 }
467 
GetReadIterForSequence(ChunkMap::iterator seq_begin)468 TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
469     ChunkMap::iterator seq_begin) {
470   SequenceIterator iter;
471   iter.seq_begin = seq_begin;
472   if (seq_begin == index_.end()) {
473     iter.cur = iter.seq_end = index_.end();
474     return iter;
475   }
476 
477 #if PERFETTO_DCHECK_IS_ON()
478   // Either |seq_begin| is == index_.begin() or the item immediately before must
479   // belong to a different {ProducerID, WriterID} sequence.
480   if (seq_begin != index_.begin() && seq_begin != index_.end()) {
481     auto prev_it = seq_begin;
482     prev_it--;
483     PERFETTO_DCHECK(
484         seq_begin == index_.begin() ||
485         std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
486             std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
487   }
488 #endif
489 
490   // Find the first entry that has a greater {ProducerID, WriterID} (or just
491   // index_.end() if we reached the end).
492   ChunkMeta::Key key = seq_begin->first;  // Deliberate copy.
493   key.chunk_id = kMaxChunkID;
494   iter.seq_end = index_.upper_bound(key);
495   PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
496 
497   // Now find the first entry between [seq_begin, seq_end) that is
498   // > last_chunk_id_written_. This is where we the sequence will start (see
499   // notes about wrapping of IDs in the header).
500   auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
501   PERFETTO_DCHECK(last_chunk_id_written_.count(producer_and_writer_id));
502   iter.wrapping_id = last_chunk_id_written_[producer_and_writer_id];
503   key.chunk_id = iter.wrapping_id;
504   iter.cur = index_.upper_bound(key);
505   if (iter.cur == iter.seq_end)
506     iter.cur = iter.seq_begin;
507   return iter;
508 }
509 
MoveNext()510 void TraceBuffer::SequenceIterator::MoveNext() {
511   // Stop iterating when we reach the end of the sequence.
512   // Note: |seq_begin| might be == |seq_end|.
513   if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
514     cur = seq_end;
515     return;
516   }
517 
518   // If the current chunk wasn't completed yet, we shouldn't advance past it as
519   // it may be rewritten with additional packets.
520   if (!cur->second.is_complete()) {
521     cur = seq_end;
522     return;
523   }
524 
525   ChunkID last_chunk_id = cur->first.chunk_id;
526   if (++cur == seq_end)
527     cur = seq_begin;
528 
529   // There may be a missing chunk in the sequence of chunks, in which case the
530   // next chunk's ID won't follow the last one's. If so, skip the rest of the
531   // sequence. We'll return to it later once the hole is filled.
532   if (last_chunk_id + 1 != cur->first.chunk_id)
533     cur = seq_end;
534 }
535 
ReadNextTracePacket(TracePacket * packet,PacketSequenceProperties * sequence_properties,bool * previous_packet_on_sequence_dropped)536 bool TraceBuffer::ReadNextTracePacket(
537     TracePacket* packet,
538     PacketSequenceProperties* sequence_properties,
539     bool* previous_packet_on_sequence_dropped) {
540   // Note: MoveNext() moves only within the next chunk within the same
541   // {ProducerID, WriterID} sequence. Here we want to:
542   // - return the next patched+complete packet in the current sequence, if any.
543   // - return the first patched+complete packet in the next sequence, if any.
544   // - return false if none of the above is found.
545   TRACE_BUFFER_DLOG("ReadNextTracePacket()");
546 
547   // Just in case we forget to initialize these below.
548   *sequence_properties = {0, ClientIdentity(), 0};
549   *previous_packet_on_sequence_dropped = false;
550 
551   // At the start of each sequence iteration, we consider the last read packet
552   // dropped. While iterating over the chunks in the sequence, we update this
553   // flag based on our knowledge about the last packet that was read from each
554   // chunk (|last_read_packet_skipped| in ChunkMeta).
555   bool previous_packet_dropped = true;
556 
557 #if PERFETTO_DCHECK_IS_ON()
558   PERFETTO_DCHECK(!changed_since_last_read_);
559 #endif
560   for (;; read_iter_.MoveNext()) {
561     if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
562       // We ran out of chunks in the current {ProducerID, WriterID} sequence or
563       // we just reached the index_.end().
564 
565       if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
566         return false;
567 
568       // We reached the end of sequence, move to the next one.
569       // Note: ++read_iter_.seq_end might become index_.end(), but
570       // GetReadIterForSequence() knows how to deal with that.
571       read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
572       PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
573       previous_packet_dropped = true;
574     }
575 
576     ChunkMeta* chunk_meta = &*read_iter_;
577 
578     // If the chunk has holes that are awaiting to be patched out-of-band,
579     // skip the current sequence and move to the next one.
580     if (chunk_meta->flags & kChunkNeedsPatching) {
581       read_iter_.MoveToEnd();
582       continue;
583     }
584 
585     const ProducerID trusted_producer_id = read_iter_.producer_id();
586     const WriterID writer_id = read_iter_.writer_id();
587     const ProducerAndWriterID producer_and_writer_id =
588         MkProducerAndWriterID(trusted_producer_id, writer_id);
589     const ClientIdentity& client_identity = chunk_meta->client_identity_trusted;
590 
591     // At this point we have a chunk in |chunk_meta| that has not been fully
592     // read. We don't know yet whether we have enough data to read the full
593     // packet (in the case it's fragmented over several chunks) and we are about
594     // to find that out. Specifically:
595     // A) If the first fragment is unread and is a fragment continuing from a
596     //    previous chunk, it means we have missed the previous ChunkID. In
597     //    fact, if this wasn't the case, a previous call to ReadNext() shouldn't
598     //    have moved the cursor to this chunk.
599     // B) Any fragment > 0 && < last is always readable. By definition an inner
600     //    packet is never fragmented and hence doesn't require neither stitching
601     //    nor any out-of-band patching. The same applies to the last packet
602     //    iff it doesn't continue on the next chunk.
603     // C) If the last packet (which might be also the only packet in the chunk)
604     //    is a fragment and continues on the next chunk, we peek at the next
605     //    chunks and, if we have all of them, mark as read and move the cursor.
606     //
607     // +---------------+   +-------------------+  +---------------+
608     // | ChunkID: 1    |   | ChunkID: 2        |  | ChunkID: 3    |
609     // |---------------+   +-------------------+  +---------------+
610     // | Packet 1      |   |                   |  | ... Packet 3  |
611     // | Packet 2      |   | ... Packet 3  ... |  | Packet 4      |
612     // | Packet 3  ... |   |                   |  | Packet 5 ...  |
613     // +---------------+   +-------------------+  +---------------+
614 
615     PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
616                     chunk_meta->num_fragments);
617 
618     // If we didn't read any packets from this chunk, the last packet was from
619     // the previous chunk we iterated over; so don't update
620     // |previous_packet_dropped| in this case.
621     if (chunk_meta->num_fragments_read > 0)
622       previous_packet_dropped = chunk_meta->last_read_packet_skipped();
623 
624     while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
625       enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
626       if (chunk_meta->num_fragments_read == 0) {
627         if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
628           action = kSkip;  // Case A.
629         } else if (chunk_meta->num_fragments == 1 &&
630                    (chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
631           action = kTryReadAhead;  // Case C.
632         } else {
633           action = kReadOnePacket;  // Case B.
634         }
635       } else if (chunk_meta->num_fragments_read <
636                      chunk_meta->num_fragments - 1 ||
637                  !(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
638         action = kReadOnePacket;  // Case B.
639       } else {
640         action = kTryReadAhead;  // Case C.
641       }
642 
643       TRACE_BUFFER_DLOG("  chunk %u, packet %hu of %hu, action=%d",
644                         read_iter_.chunk_id(), chunk_meta->num_fragments_read,
645                         chunk_meta->num_fragments, action);
646 
647       if (action == kSkip) {
648         // This fragment will be skipped forever, not just in this ReadPacket()
649         // iteration. This happens by virtue of ReadNextPacketInChunk()
650         // incrementing the |num_fragments_read| and marking the fragment as
651         // read even if we didn't really.
652         ReadNextPacketInChunk(producer_and_writer_id, chunk_meta, nullptr);
653         chunk_meta->set_last_read_packet_skipped(true);
654         previous_packet_dropped = true;
655         continue;
656       }
657 
658       if (action == kReadOnePacket) {
659         // The easy peasy case B.
660         ReadPacketResult result =
661             ReadNextPacketInChunk(producer_and_writer_id, chunk_meta, packet);
662 
663         if (PERFETTO_LIKELY(result == ReadPacketResult::kSucceeded)) {
664           *sequence_properties = {trusted_producer_id, client_identity,
665                                   writer_id};
666           *previous_packet_on_sequence_dropped = previous_packet_dropped;
667           return true;
668         } else if (result == ReadPacketResult::kFailedEmptyPacket) {
669           // We can ignore and skip empty packets.
670           PERFETTO_DCHECK(packet->slices().empty());
671           continue;
672         }
673 
674         // In extremely rare cases (producer bugged / malicious) the chunk might
675         // contain an invalid fragment. In such case we don't want to stall the
676         // sequence but just skip the chunk and move on. ReadNextPacketInChunk()
677         // marks the chunk as fully read, so we don't attempt to read from it
678         // again in a future call to ReadBuffers(). It also already records an
679         // abi violation for this.
680         PERFETTO_DCHECK(result == ReadPacketResult::kFailedInvalidPacket);
681         chunk_meta->set_last_read_packet_skipped(true);
682         previous_packet_dropped = true;
683         break;
684       }
685 
686       PERFETTO_DCHECK(action == kTryReadAhead);
687       ReadAheadResult ra_res = ReadAhead(packet);
688       if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
689         stats_.set_readaheads_succeeded(stats_.readaheads_succeeded() + 1);
690         *sequence_properties = {trusted_producer_id, client_identity,
691                                 writer_id};
692         *previous_packet_on_sequence_dropped = previous_packet_dropped;
693         return true;
694       }
695 
696       if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
697         // readahead didn't find a contiguous packet sequence. We'll try again
698         // on the next ReadPacket() call.
699         stats_.set_readaheads_failed(stats_.readaheads_failed() + 1);
700 
701         // TODO(primiano): optimization: this MoveToEnd() is the reason why
702         // MoveNext() (that is called in the outer for(;;MoveNext)) needs to
703         // deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
704         // something to avoid that check by reshuffling the code here?
705         read_iter_.MoveToEnd();
706 
707         // This break will go back to beginning of the for(;;MoveNext()). That
708         // will move to the next sequence because we set the read iterator to
709         // its end.
710         break;
711       }
712 
713       PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
714 
715       // In this case ReadAhead() might advance |read_iter_|, so we need to
716       // re-cache the |chunk_meta| pointer to point to the current chunk.
717       chunk_meta = &*read_iter_;
718       chunk_meta->set_last_read_packet_skipped(true);
719       previous_packet_dropped = true;
720     }  // while(...)  [iterate over packet fragments for the current chunk].
721   }    // for(;;MoveNext()) [iterate over chunks].
722 }
723 
ReadAhead(TracePacket * packet)724 TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
725   static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
726                 "relying on kMaxChunkID to wrap naturally");
727   TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
728   ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
729   SequenceIterator it = read_iter_;
730   for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
731     // We should stay within the same sequence while iterating here.
732     PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
733                     it.writer_id() == read_iter_.writer_id());
734 
735     TRACE_BUFFER_DLOG("   expected chunk ID: %u, actual ID: %u", next_chunk_id,
736                       it.chunk_id());
737 
738     if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
739       continue;
740 
741     // If we miss the next chunk, stop looking in the current sequence and
742     // try another sequence. This chunk might come in the near future.
743     // The second condition is the edge case of a buggy/malicious
744     // producer. The ChunkID is contiguous but its flags don't make sense.
745     if (it.chunk_id() != next_chunk_id ||
746         PERFETTO_UNLIKELY(
747             !((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
748       return ReadAheadResult::kFailedMoveToNextSequence;
749     }
750 
751     // If the chunk is contiguous but has not been patched yet move to the next
752     // sequence and try coming back here on the next ReadNextTracePacket() call.
753     // TODO(primiano): add a test to cover this, it's a subtle case.
754     if ((*it).flags & kChunkNeedsPatching)
755       return ReadAheadResult::kFailedMoveToNextSequence;
756 
757     // This is the case of an intermediate chunk which contains only one
758     // fragment which continues on the next chunk. This is the case for large
759     // packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
760     // (Packet1(X) := fragment X of Packet1).
761     if ((*it).num_fragments == 1 &&
762         ((*it).flags & kLastPacketContinuesOnNextChunk)) {
763       continue;
764     }
765 
766     // We made it! We got all fragments for the packet without holes.
767     TRACE_BUFFER_DLOG("  readahead success @ chunk %u", it.chunk_id());
768     PERFETTO_DCHECK(((*it).num_fragments == 1 &&
769                      !((*it).flags & kLastPacketContinuesOnNextChunk)) ||
770                     (*it).num_fragments > 1);
771 
772     // Now let's re-iterate over the [read_iter_, it] sequence and mark
773     // all the fragments as read.
774     bool packet_corruption = false;
775     for (;;) {
776       PERFETTO_DCHECK(read_iter_.is_valid());
777       TRACE_BUFFER_DLOG("    commit chunk %u", read_iter_.chunk_id());
778       if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
779         // In the unlikely case of a corrupted packet (corrupted or empty
780         // fragment), invalidate the all stitching and move on to the next chunk
781         // in the same sequence, if any.
782         auto pw_id = MkProducerAndWriterID(it.producer_id(), it.writer_id());
783         packet_corruption |=
784             ReadNextPacketInChunk(pw_id, &*read_iter_, packet) ==
785             ReadPacketResult::kFailedInvalidPacket;
786       }
787       if (read_iter_.cur == it.cur)
788         break;
789       read_iter_.MoveNext();
790     }  // for(;;)
791     PERFETTO_DCHECK(read_iter_.cur == it.cur);
792 
793     if (PERFETTO_UNLIKELY(packet_corruption)) {
794       // ReadNextPacketInChunk() already records an abi violation for this case.
795       *packet = TracePacket();  // clear.
796       return ReadAheadResult::kFailedStayOnSameSequence;
797     }
798 
799     return ReadAheadResult::kSucceededReturnSlices;
800   }  // for(it...)  [readahead loop]
801   return ReadAheadResult::kFailedMoveToNextSequence;
802 }
803 
ReadNextPacketInChunk(ProducerAndWriterID producer_and_writer_id,ChunkMeta * const chunk_meta,TracePacket * packet)804 TraceBuffer::ReadPacketResult TraceBuffer::ReadNextPacketInChunk(
805     ProducerAndWriterID producer_and_writer_id,
806     ChunkMeta* const chunk_meta,
807     TracePacket* packet) {
808   PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
809   PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
810 
811   const uint8_t* record_begin = begin() + chunk_meta->record_off;
812   DcheckIsAlignedAndWithinBounds(record_begin);
813   auto* chunk_record = reinterpret_cast<const ChunkRecord*>(record_begin);
814   const uint8_t* record_end = record_begin + chunk_record->size;
815   const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
816   const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
817 
818   if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
819                         packet_begin >= record_end)) {
820     // The producer has a bug or is malicious and did declare that the chunk
821     // contains more packets beyond its boundaries.
822     stats_.set_abi_violations(stats_.abi_violations() + 1);
823     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
824     chunk_meta->cur_fragment_offset = 0;
825     chunk_meta->num_fragments_read = chunk_meta->num_fragments;
826     if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
827       stats_.set_chunks_read(stats_.chunks_read() + 1);
828       stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
829     }
830     return ReadPacketResult::kFailedInvalidPacket;
831   }
832 
833   // A packet (or a fragment) starts with a varint stating its size, followed
834   // by its content. The varint shouldn't be larger than 4 bytes (just in case
835   // the producer is using a redundant encoding)
836   uint64_t packet_size = 0;
837   const uint8_t* header_end =
838       std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
839                record_end);
840   const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
841       packet_begin, header_end, &packet_size);
842 
843   const uint8_t* next_packet = packet_data + packet_size;
844   if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
845                         next_packet > record_end)) {
846     // In BufferExhaustedPolicy::kDrop mode, TraceWriter may abort a fragmented
847     // packet by writing an invalid size in the last fragment's header. We
848     // should handle this case without recording an ABI violation (since Android
849     // R).
850     if (packet_size != SharedMemoryABI::kPacketSizeDropPacket) {
851       stats_.set_abi_violations(stats_.abi_violations() + 1);
852       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
853     } else {
854       stats_.set_trace_writer_packet_loss(stats_.trace_writer_packet_loss() +
855                                           1);
856     }
857     chunk_meta->cur_fragment_offset = 0;
858     chunk_meta->num_fragments_read = chunk_meta->num_fragments;
859     if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
860       stats_.set_chunks_read(stats_.chunks_read() + 1);
861       stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
862     }
863     return ReadPacketResult::kFailedInvalidPacket;
864   }
865 
866   chunk_meta->cur_fragment_offset =
867       static_cast<uint16_t>(next_packet - packets_begin);
868   chunk_meta->num_fragments_read++;
869 
870   if (PERFETTO_UNLIKELY(chunk_meta->num_fragments_read ==
871                             chunk_meta->num_fragments &&
872                         chunk_meta->is_complete())) {
873     stats_.set_chunks_read(stats_.chunks_read() + 1);
874     stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
875     auto* writer_stats = writer_stats_.Insert(producer_and_writer_id, {}).first;
876     writer_stats->used_chunk_hist.Add(chunk_meta->cur_fragment_offset);
877   } else {
878     // We have at least one more packet to parse. It should be within the chunk.
879     if (chunk_meta->cur_fragment_offset + sizeof(ChunkRecord) >=
880         chunk_record->size) {
881       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
882     }
883   }
884 
885   chunk_meta->set_last_read_packet_skipped(false);
886 
887   if (PERFETTO_UNLIKELY(packet_size == 0))
888     return ReadPacketResult::kFailedEmptyPacket;
889 
890   if (PERFETTO_LIKELY(packet))
891     packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
892 
893   return ReadPacketResult::kSucceeded;
894 }
895 
DiscardWrite()896 void TraceBuffer::DiscardWrite() {
897   PERFETTO_DCHECK(overwrite_policy_ == kDiscard);
898   discard_writes_ = true;
899   stats_.set_chunks_discarded(stats_.chunks_discarded() + 1);
900   TRACE_BUFFER_DLOG("  discarding write");
901 }
902 
CloneReadOnly() const903 std::unique_ptr<TraceBuffer> TraceBuffer::CloneReadOnly() const {
904   std::unique_ptr<TraceBuffer> buf(new TraceBuffer(CloneCtor(), *this));
905   if (!buf->data_.IsValid())
906     return nullptr;  // PagedMemory::Allocate() failed. We are out of memory.
907   return buf;
908 }
909 
TraceBuffer(CloneCtor,const TraceBuffer & src)910 TraceBuffer::TraceBuffer(CloneCtor, const TraceBuffer& src)
911     : overwrite_policy_(src.overwrite_policy_),
912       read_only_(true),
913       discard_writes_(src.discard_writes_) {
914   if (!Initialize(src.data_.size()))
915     return;  // TraceBuffer::Clone() will check |data_| and return nullptr.
916 
917   // The assignments below must be done after Initialize().
918 
919   EnsureCommitted(src.used_size_);
920   memcpy(data_.Get(), src.data_.Get(), src.used_size_);
921   last_chunk_id_written_ = src.last_chunk_id_written_;
922 
923   stats_ = src.stats_;
924   stats_.set_bytes_read(0);
925   stats_.set_chunks_read(0);
926   stats_.set_readaheads_failed(0);
927   stats_.set_readaheads_succeeded(0);
928 
929   // Copy the index of chunk metadata and reset the read states.
930   index_ = ChunkMap(src.index_);
931   for (auto& kv : index_) {
932     ChunkMeta& chunk_meta = kv.second;
933     chunk_meta.num_fragments_read = 0;
934     chunk_meta.cur_fragment_offset = 0;
935     chunk_meta.set_last_read_packet_skipped(false);
936   }
937   read_iter_ = SequenceIterator();
938 }
939 
940 }  // namespace perfetto
941