1*6dbdd20aSAndroid Build Coastguard Worker /*
2*6dbdd20aSAndroid Build Coastguard Worker * Copyright (C) 2018 The Android Open Source Project
3*6dbdd20aSAndroid Build Coastguard Worker *
4*6dbdd20aSAndroid Build Coastguard Worker * Licensed under the Apache License, Version 2.0 (the "License");
5*6dbdd20aSAndroid Build Coastguard Worker * you may not use this file except in compliance with the License.
6*6dbdd20aSAndroid Build Coastguard Worker * You may obtain a copy of the License at
7*6dbdd20aSAndroid Build Coastguard Worker *
8*6dbdd20aSAndroid Build Coastguard Worker * http://www.apache.org/licenses/LICENSE-2.0
9*6dbdd20aSAndroid Build Coastguard Worker *
10*6dbdd20aSAndroid Build Coastguard Worker * Unless required by applicable law or agreed to in writing, software
11*6dbdd20aSAndroid Build Coastguard Worker * distributed under the License is distributed on an "AS IS" BASIS,
12*6dbdd20aSAndroid Build Coastguard Worker * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13*6dbdd20aSAndroid Build Coastguard Worker * See the License for the specific language governing permissions and
14*6dbdd20aSAndroid Build Coastguard Worker * limitations under the License.
15*6dbdd20aSAndroid Build Coastguard Worker */
16*6dbdd20aSAndroid Build Coastguard Worker
17*6dbdd20aSAndroid Build Coastguard Worker #include "src/tracing/service/trace_buffer.h"
18*6dbdd20aSAndroid Build Coastguard Worker
19*6dbdd20aSAndroid Build Coastguard Worker #include <limits>
20*6dbdd20aSAndroid Build Coastguard Worker
21*6dbdd20aSAndroid Build Coastguard Worker #include "perfetto/base/logging.h"
22*6dbdd20aSAndroid Build Coastguard Worker #include "perfetto/ext/base/utils.h"
23*6dbdd20aSAndroid Build Coastguard Worker #include "perfetto/ext/tracing/core/client_identity.h"
24*6dbdd20aSAndroid Build Coastguard Worker #include "perfetto/ext/tracing/core/shared_memory_abi.h"
25*6dbdd20aSAndroid Build Coastguard Worker #include "perfetto/ext/tracing/core/trace_packet.h"
26*6dbdd20aSAndroid Build Coastguard Worker #include "perfetto/protozero/proto_utils.h"
27*6dbdd20aSAndroid Build Coastguard Worker
28*6dbdd20aSAndroid Build Coastguard Worker #define TRACE_BUFFER_VERBOSE_LOGGING() 0 // Set to 1 when debugging unittests.
29*6dbdd20aSAndroid Build Coastguard Worker #if TRACE_BUFFER_VERBOSE_LOGGING()
30*6dbdd20aSAndroid Build Coastguard Worker #define TRACE_BUFFER_DLOG PERFETTO_DLOG
31*6dbdd20aSAndroid Build Coastguard Worker #else
32*6dbdd20aSAndroid Build Coastguard Worker #define TRACE_BUFFER_DLOG(...) void()
33*6dbdd20aSAndroid Build Coastguard Worker #endif
34*6dbdd20aSAndroid Build Coastguard Worker
35*6dbdd20aSAndroid Build Coastguard Worker namespace perfetto {
36*6dbdd20aSAndroid Build Coastguard Worker
37*6dbdd20aSAndroid Build Coastguard Worker namespace {
38*6dbdd20aSAndroid Build Coastguard Worker constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
39*6dbdd20aSAndroid Build Coastguard Worker SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
40*6dbdd20aSAndroid Build Coastguard Worker constexpr uint8_t kLastPacketContinuesOnNextChunk =
41*6dbdd20aSAndroid Build Coastguard Worker SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
42*6dbdd20aSAndroid Build Coastguard Worker constexpr uint8_t kChunkNeedsPatching =
43*6dbdd20aSAndroid Build Coastguard Worker SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
44*6dbdd20aSAndroid Build Coastguard Worker } // namespace.
45*6dbdd20aSAndroid Build Coastguard Worker
46*6dbdd20aSAndroid Build Coastguard Worker const size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
47*6dbdd20aSAndroid Build Coastguard Worker
48*6dbdd20aSAndroid Build Coastguard Worker // static
Create(size_t size_in_bytes,OverwritePolicy pol)49*6dbdd20aSAndroid Build Coastguard Worker std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes,
50*6dbdd20aSAndroid Build Coastguard Worker OverwritePolicy pol) {
51*6dbdd20aSAndroid Build Coastguard Worker std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer(pol));
52*6dbdd20aSAndroid Build Coastguard Worker if (!trace_buffer->Initialize(size_in_bytes))
53*6dbdd20aSAndroid Build Coastguard Worker return nullptr;
54*6dbdd20aSAndroid Build Coastguard Worker return trace_buffer;
55*6dbdd20aSAndroid Build Coastguard Worker }
56*6dbdd20aSAndroid Build Coastguard Worker
TraceBuffer(OverwritePolicy pol)57*6dbdd20aSAndroid Build Coastguard Worker TraceBuffer::TraceBuffer(OverwritePolicy pol) : overwrite_policy_(pol) {
58*6dbdd20aSAndroid Build Coastguard Worker // See comments in ChunkRecord for the rationale of this.
59*6dbdd20aSAndroid Build Coastguard Worker static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
60*6dbdd20aSAndroid Build Coastguard Worker sizeof(SharedMemoryABI::ChunkHeader),
61*6dbdd20aSAndroid Build Coastguard Worker "ChunkRecord out of sync with the layout of SharedMemoryABI");
62*6dbdd20aSAndroid Build Coastguard Worker }
63*6dbdd20aSAndroid Build Coastguard Worker
64*6dbdd20aSAndroid Build Coastguard Worker TraceBuffer::~TraceBuffer() = default;
65*6dbdd20aSAndroid Build Coastguard Worker
Initialize(size_t size)66*6dbdd20aSAndroid Build Coastguard Worker bool TraceBuffer::Initialize(size_t size) {
67*6dbdd20aSAndroid Build Coastguard Worker static_assert(
68*6dbdd20aSAndroid Build Coastguard Worker SharedMemoryABI::kMinPageSize % sizeof(ChunkRecord) == 0,
69*6dbdd20aSAndroid Build Coastguard Worker "sizeof(ChunkRecord) must be an integer divider of a page size");
70*6dbdd20aSAndroid Build Coastguard Worker auto max_size = std::numeric_limits<decltype(ChunkMeta::record_off)>::max();
71*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_CHECK(size <= static_cast<size_t>(max_size));
72*6dbdd20aSAndroid Build Coastguard Worker data_ = base::PagedMemory::Allocate(
73*6dbdd20aSAndroid Build Coastguard Worker size, base::PagedMemory::kMayFail | base::PagedMemory::kDontCommit);
74*6dbdd20aSAndroid Build Coastguard Worker if (!data_.IsValid()) {
75*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
76*6dbdd20aSAndroid Build Coastguard Worker return false;
77*6dbdd20aSAndroid Build Coastguard Worker }
78*6dbdd20aSAndroid Build Coastguard Worker size_ = size;
79*6dbdd20aSAndroid Build Coastguard Worker used_size_ = 0;
80*6dbdd20aSAndroid Build Coastguard Worker stats_.set_buffer_size(size);
81*6dbdd20aSAndroid Build Coastguard Worker max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
82*6dbdd20aSAndroid Build Coastguard Worker wptr_ = begin();
83*6dbdd20aSAndroid Build Coastguard Worker index_.clear();
84*6dbdd20aSAndroid Build Coastguard Worker last_chunk_id_written_.clear();
85*6dbdd20aSAndroid Build Coastguard Worker read_iter_ = GetReadIterForSequence(index_.end());
86*6dbdd20aSAndroid Build Coastguard Worker return true;
87*6dbdd20aSAndroid Build Coastguard Worker }
88*6dbdd20aSAndroid Build Coastguard Worker
89*6dbdd20aSAndroid Build Coastguard Worker // Note: |src| points to a shmem region that is shared with the producer. Assume
90*6dbdd20aSAndroid Build Coastguard Worker // that the producer is malicious and will change the content of |src|
91*6dbdd20aSAndroid Build Coastguard Worker // while we execute here. Don't do any processing on it other than memcpy().
CopyChunkUntrusted(ProducerID producer_id_trusted,const ClientIdentity & client_identity_trusted,WriterID writer_id,ChunkID chunk_id,uint16_t num_fragments,uint8_t chunk_flags,bool chunk_complete,const uint8_t * src,size_t size)92*6dbdd20aSAndroid Build Coastguard Worker void TraceBuffer::CopyChunkUntrusted(
93*6dbdd20aSAndroid Build Coastguard Worker ProducerID producer_id_trusted,
94*6dbdd20aSAndroid Build Coastguard Worker const ClientIdentity& client_identity_trusted,
95*6dbdd20aSAndroid Build Coastguard Worker WriterID writer_id,
96*6dbdd20aSAndroid Build Coastguard Worker ChunkID chunk_id,
97*6dbdd20aSAndroid Build Coastguard Worker uint16_t num_fragments,
98*6dbdd20aSAndroid Build Coastguard Worker uint8_t chunk_flags,
99*6dbdd20aSAndroid Build Coastguard Worker bool chunk_complete,
100*6dbdd20aSAndroid Build Coastguard Worker const uint8_t* src,
101*6dbdd20aSAndroid Build Coastguard Worker size_t size) {
102*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_CHECK(!read_only_);
103*6dbdd20aSAndroid Build Coastguard Worker
104*6dbdd20aSAndroid Build Coastguard Worker // |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
105*6dbdd20aSAndroid Build Coastguard Worker // up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
106*6dbdd20aSAndroid Build Coastguard Worker const size_t record_size =
107*6dbdd20aSAndroid Build Coastguard Worker base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
108*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG("CopyChunk @ %" PRIdPTR ", size=%zu", wptr_ - begin(), record_size);
109*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
110*6dbdd20aSAndroid Build Coastguard Worker stats_.set_abi_violations(stats_.abi_violations() + 1);
111*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
112*6dbdd20aSAndroid Build Coastguard Worker return;
113*6dbdd20aSAndroid Build Coastguard Worker }
114*6dbdd20aSAndroid Build Coastguard Worker
115*6dbdd20aSAndroid Build Coastguard Worker has_data_ = true;
116*6dbdd20aSAndroid Build Coastguard Worker #if PERFETTO_DCHECK_IS_ON()
117*6dbdd20aSAndroid Build Coastguard Worker changed_since_last_read_ = true;
118*6dbdd20aSAndroid Build Coastguard Worker #endif
119*6dbdd20aSAndroid Build Coastguard Worker
120*6dbdd20aSAndroid Build Coastguard Worker // If the chunk hasn't been completed, we should only consider the first
121*6dbdd20aSAndroid Build Coastguard Worker // |num_fragments - 1| packets complete. For simplicity, we simply disregard
122*6dbdd20aSAndroid Build Coastguard Worker // the last one when we copy the chunk.
123*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(!chunk_complete)) {
124*6dbdd20aSAndroid Build Coastguard Worker if (num_fragments > 0) {
125*6dbdd20aSAndroid Build Coastguard Worker num_fragments--;
126*6dbdd20aSAndroid Build Coastguard Worker // These flags should only affect the last packet in the chunk. We clear
127*6dbdd20aSAndroid Build Coastguard Worker // them, so that TraceBuffer is able to look at the remaining packets in
128*6dbdd20aSAndroid Build Coastguard Worker // this chunk.
129*6dbdd20aSAndroid Build Coastguard Worker chunk_flags &= ~kLastPacketContinuesOnNextChunk;
130*6dbdd20aSAndroid Build Coastguard Worker chunk_flags &= ~kChunkNeedsPatching;
131*6dbdd20aSAndroid Build Coastguard Worker }
132*6dbdd20aSAndroid Build Coastguard Worker }
133*6dbdd20aSAndroid Build Coastguard Worker
134*6dbdd20aSAndroid Build Coastguard Worker ChunkRecord record(record_size);
135*6dbdd20aSAndroid Build Coastguard Worker record.producer_id = producer_id_trusted;
136*6dbdd20aSAndroid Build Coastguard Worker record.chunk_id = chunk_id;
137*6dbdd20aSAndroid Build Coastguard Worker record.writer_id = writer_id;
138*6dbdd20aSAndroid Build Coastguard Worker record.num_fragments = num_fragments;
139*6dbdd20aSAndroid Build Coastguard Worker record.flags = chunk_flags & ChunkRecord::kFlagsBitMask;
140*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta::Key key(record);
141*6dbdd20aSAndroid Build Coastguard Worker
142*6dbdd20aSAndroid Build Coastguard Worker // Check whether we have already copied the same chunk previously. This may
143*6dbdd20aSAndroid Build Coastguard Worker // happen if the service scrapes chunks in a potentially incomplete state
144*6dbdd20aSAndroid Build Coastguard Worker // before receiving commit requests for them from the producer. Note that the
145*6dbdd20aSAndroid Build Coastguard Worker // service may scrape and thus override chunks in arbitrary order since the
146*6dbdd20aSAndroid Build Coastguard Worker // chunks aren't ordered in the SMB.
147*6dbdd20aSAndroid Build Coastguard Worker const auto it = index_.find(key);
148*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(it != index_.end())) {
149*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta* record_meta = &it->second;
150*6dbdd20aSAndroid Build Coastguard Worker ChunkRecord* prev = GetChunkRecordAt(begin() + record_meta->record_off);
151*6dbdd20aSAndroid Build Coastguard Worker
152*6dbdd20aSAndroid Build Coastguard Worker // Verify that the old chunk's metadata corresponds to the new one.
153*6dbdd20aSAndroid Build Coastguard Worker // Overridden chunks should never change size, since the page layout is
154*6dbdd20aSAndroid Build Coastguard Worker // fixed per writer. The number of fragments should also never decrease and
155*6dbdd20aSAndroid Build Coastguard Worker // flags should not be removed.
156*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(ChunkMeta::Key(*prev) != key ||
157*6dbdd20aSAndroid Build Coastguard Worker prev->size != record_size ||
158*6dbdd20aSAndroid Build Coastguard Worker prev->num_fragments > num_fragments ||
159*6dbdd20aSAndroid Build Coastguard Worker (prev->flags & chunk_flags) != prev->flags)) {
160*6dbdd20aSAndroid Build Coastguard Worker stats_.set_abi_violations(stats_.abi_violations() + 1);
161*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
162*6dbdd20aSAndroid Build Coastguard Worker return;
163*6dbdd20aSAndroid Build Coastguard Worker }
164*6dbdd20aSAndroid Build Coastguard Worker
165*6dbdd20aSAndroid Build Coastguard Worker // If this chunk was previously copied with the same number of fragments and
166*6dbdd20aSAndroid Build Coastguard Worker // the number didn't change, there's no need to copy it again. If the
167*6dbdd20aSAndroid Build Coastguard Worker // previous chunk was complete already, this should always be the case.
168*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(suppress_client_dchecks_for_testing_ ||
169*6dbdd20aSAndroid Build Coastguard Worker !record_meta->is_complete() ||
170*6dbdd20aSAndroid Build Coastguard Worker (chunk_complete && prev->num_fragments == num_fragments));
171*6dbdd20aSAndroid Build Coastguard Worker if (prev->num_fragments == num_fragments) {
172*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" skipping recommit of identical chunk");
173*6dbdd20aSAndroid Build Coastguard Worker return;
174*6dbdd20aSAndroid Build Coastguard Worker }
175*6dbdd20aSAndroid Build Coastguard Worker
176*6dbdd20aSAndroid Build Coastguard Worker // If we've already started reading from chunk N+1 following this chunk N,
177*6dbdd20aSAndroid Build Coastguard Worker // don't override chunk N. Otherwise we may end up reading a packet from
178*6dbdd20aSAndroid Build Coastguard Worker // chunk N after having read from chunk N+1, thereby violating sequential
179*6dbdd20aSAndroid Build Coastguard Worker // read of packets. This shouldn't happen if the producer is well-behaved,
180*6dbdd20aSAndroid Build Coastguard Worker // because it shouldn't start chunk N+1 before completing chunk N.
181*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta::Key subsequent_key = key;
182*6dbdd20aSAndroid Build Coastguard Worker static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
183*6dbdd20aSAndroid Build Coastguard Worker "ChunkID wraps");
184*6dbdd20aSAndroid Build Coastguard Worker subsequent_key.chunk_id++;
185*6dbdd20aSAndroid Build Coastguard Worker const auto subsequent_it = index_.find(subsequent_key);
186*6dbdd20aSAndroid Build Coastguard Worker if (subsequent_it != index_.end() &&
187*6dbdd20aSAndroid Build Coastguard Worker subsequent_it->second.num_fragments_read > 0) {
188*6dbdd20aSAndroid Build Coastguard Worker stats_.set_abi_violations(stats_.abi_violations() + 1);
189*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
190*6dbdd20aSAndroid Build Coastguard Worker return;
191*6dbdd20aSAndroid Build Coastguard Worker }
192*6dbdd20aSAndroid Build Coastguard Worker
193*6dbdd20aSAndroid Build Coastguard Worker // We should not have read past the last packet.
194*6dbdd20aSAndroid Build Coastguard Worker if (record_meta->num_fragments_read > prev->num_fragments) {
195*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_ELOG(
196*6dbdd20aSAndroid Build Coastguard Worker "TraceBuffer read too many fragments from an incomplete chunk");
197*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
198*6dbdd20aSAndroid Build Coastguard Worker return;
199*6dbdd20aSAndroid Build Coastguard Worker }
200*6dbdd20aSAndroid Build Coastguard Worker
201*6dbdd20aSAndroid Build Coastguard Worker uint8_t* wptr = reinterpret_cast<uint8_t*>(prev);
202*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" overriding chunk @ %" PRIdPTR ", size=%zu", wptr - begin(),
203*6dbdd20aSAndroid Build Coastguard Worker record_size);
204*6dbdd20aSAndroid Build Coastguard Worker
205*6dbdd20aSAndroid Build Coastguard Worker // Update chunk meta data stored in the index, as it may have changed.
206*6dbdd20aSAndroid Build Coastguard Worker record_meta->num_fragments = num_fragments;
207*6dbdd20aSAndroid Build Coastguard Worker record_meta->flags = chunk_flags;
208*6dbdd20aSAndroid Build Coastguard Worker record_meta->set_complete(chunk_complete);
209*6dbdd20aSAndroid Build Coastguard Worker
210*6dbdd20aSAndroid Build Coastguard Worker // Override the ChunkRecord contents at the original |wptr|.
211*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" copying @ [%" PRIdPTR " - %" PRIdPTR "] %zu", wptr - begin(),
212*6dbdd20aSAndroid Build Coastguard Worker uintptr_t(wptr - begin()) + record_size, record_size);
213*6dbdd20aSAndroid Build Coastguard Worker WriteChunkRecord(wptr, record, src, size);
214*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG("Chunk raw: %s",
215*6dbdd20aSAndroid Build Coastguard Worker base::HexDump(wptr, record_size).c_str());
216*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_rewritten(stats_.chunks_rewritten() + 1);
217*6dbdd20aSAndroid Build Coastguard Worker return;
218*6dbdd20aSAndroid Build Coastguard Worker }
219*6dbdd20aSAndroid Build Coastguard Worker
220*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(discard_writes_))
221*6dbdd20aSAndroid Build Coastguard Worker return DiscardWrite();
222*6dbdd20aSAndroid Build Coastguard Worker
223*6dbdd20aSAndroid Build Coastguard Worker // If there isn't enough room from the given write position. Write a padding
224*6dbdd20aSAndroid Build Coastguard Worker // record to clear the end of the buffer and wrap back.
225*6dbdd20aSAndroid Build Coastguard Worker const size_t cached_size_to_end = size_to_end();
226*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
227*6dbdd20aSAndroid Build Coastguard Worker ssize_t res = DeleteNextChunksFor(cached_size_to_end);
228*6dbdd20aSAndroid Build Coastguard Worker if (res == -1)
229*6dbdd20aSAndroid Build Coastguard Worker return DiscardWrite();
230*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(static_cast<size_t>(res) <= cached_size_to_end);
231*6dbdd20aSAndroid Build Coastguard Worker AddPaddingRecord(cached_size_to_end);
232*6dbdd20aSAndroid Build Coastguard Worker wptr_ = begin();
233*6dbdd20aSAndroid Build Coastguard Worker stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
234*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(size_to_end() >= record_size);
235*6dbdd20aSAndroid Build Coastguard Worker }
236*6dbdd20aSAndroid Build Coastguard Worker
237*6dbdd20aSAndroid Build Coastguard Worker // At this point either |wptr_| points to an untouched part of the buffer
238*6dbdd20aSAndroid Build Coastguard Worker // (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
239*6dbdd20aSAndroid Build Coastguard Worker // In the latter case we need to first figure out where the next valid
240*6dbdd20aSAndroid Build Coastguard Worker // ChunkRecord is (if it exists) and add padding between the new record.
241*6dbdd20aSAndroid Build Coastguard Worker // Example ((w) == write cursor):
242*6dbdd20aSAndroid Build Coastguard Worker //
243*6dbdd20aSAndroid Build Coastguard Worker // Initial state (wtpr_ == 0):
244*6dbdd20aSAndroid Build Coastguard Worker // |0 (w) |10 |30 |50
245*6dbdd20aSAndroid Build Coastguard Worker // +---------+-----------------+--------------------+--------------------+
246*6dbdd20aSAndroid Build Coastguard Worker // | Chunk 1 | Chunk 2 | Chunk 3 | Chunk 4 |
247*6dbdd20aSAndroid Build Coastguard Worker // +---------+-----------------+--------------------+--------------------+
248*6dbdd20aSAndroid Build Coastguard Worker //
249*6dbdd20aSAndroid Build Coastguard Worker // Let's assume we now want now write a 5th Chunk of size == 35. The final
250*6dbdd20aSAndroid Build Coastguard Worker // state should look like this:
251*6dbdd20aSAndroid Build Coastguard Worker // |0 |35 (w) |50
252*6dbdd20aSAndroid Build Coastguard Worker // +---------------------------------+---------------+--------------------+
253*6dbdd20aSAndroid Build Coastguard Worker // | Chunk 5 | Padding Chunk | Chunk 4 |
254*6dbdd20aSAndroid Build Coastguard Worker // +---------------------------------+---------------+--------------------+
255*6dbdd20aSAndroid Build Coastguard Worker
256*6dbdd20aSAndroid Build Coastguard Worker // Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
257*6dbdd20aSAndroid Build Coastguard Worker ssize_t del_res = DeleteNextChunksFor(record_size);
258*6dbdd20aSAndroid Build Coastguard Worker if (del_res == -1)
259*6dbdd20aSAndroid Build Coastguard Worker return DiscardWrite();
260*6dbdd20aSAndroid Build Coastguard Worker size_t padding_size = static_cast<size_t>(del_res);
261*6dbdd20aSAndroid Build Coastguard Worker
262*6dbdd20aSAndroid Build Coastguard Worker // Now first insert the new chunk. At the end, if necessary, add the padding.
263*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_written(stats_.chunks_written() + 1);
264*6dbdd20aSAndroid Build Coastguard Worker stats_.set_bytes_written(stats_.bytes_written() + record_size);
265*6dbdd20aSAndroid Build Coastguard Worker
266*6dbdd20aSAndroid Build Coastguard Worker uint32_t chunk_off = GetOffset(GetChunkRecordAt(wptr_));
267*6dbdd20aSAndroid Build Coastguard Worker auto it_and_inserted =
268*6dbdd20aSAndroid Build Coastguard Worker index_.emplace(key, ChunkMeta(chunk_off, num_fragments, chunk_complete,
269*6dbdd20aSAndroid Build Coastguard Worker chunk_flags, client_identity_trusted));
270*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(it_and_inserted.second);
271*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" copying @ [%" PRIdPTR " - %" PRIdPTR "] %zu", wptr_ - begin(),
272*6dbdd20aSAndroid Build Coastguard Worker uintptr_t(wptr_ - begin()) + record_size, record_size);
273*6dbdd20aSAndroid Build Coastguard Worker WriteChunkRecord(wptr_, record, src, size);
274*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG("Chunk raw: %s", base::HexDump(wptr_, record_size).c_str());
275*6dbdd20aSAndroid Build Coastguard Worker wptr_ += record_size;
276*6dbdd20aSAndroid Build Coastguard Worker if (wptr_ >= end()) {
277*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(padding_size == 0);
278*6dbdd20aSAndroid Build Coastguard Worker wptr_ = begin();
279*6dbdd20aSAndroid Build Coastguard Worker stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
280*6dbdd20aSAndroid Build Coastguard Worker }
281*6dbdd20aSAndroid Build Coastguard Worker DcheckIsAlignedAndWithinBounds(wptr_);
282*6dbdd20aSAndroid Build Coastguard Worker
283*6dbdd20aSAndroid Build Coastguard Worker // Chunks may be received out of order, so only update last_chunk_id if the
284*6dbdd20aSAndroid Build Coastguard Worker // new chunk_id is larger. But take into account overflows by only selecting
285*6dbdd20aSAndroid Build Coastguard Worker // the new ID if its distance to the latest ID is smaller than half the number
286*6dbdd20aSAndroid Build Coastguard Worker // space.
287*6dbdd20aSAndroid Build Coastguard Worker //
288*6dbdd20aSAndroid Build Coastguard Worker // This accounts for both the case where the new ID has just overflown and
289*6dbdd20aSAndroid Build Coastguard Worker // last_chunk_id be updated even though it's smaller (e.g. |chunk_id| = 1 and
290*6dbdd20aSAndroid Build Coastguard Worker // |last_chunk_id| = kMaxChunkId; chunk_id - last_chunk_id = 0) and the case
291*6dbdd20aSAndroid Build Coastguard Worker // where the new ID is an out-of-order ID right after an overflow and
292*6dbdd20aSAndroid Build Coastguard Worker // last_chunk_id shouldn't be updated even though it's larger (e.g. |chunk_id|
293*6dbdd20aSAndroid Build Coastguard Worker // = kMaxChunkId and |last_chunk_id| = 1; chunk_id - last_chunk_id =
294*6dbdd20aSAndroid Build Coastguard Worker // kMaxChunkId - 1).
295*6dbdd20aSAndroid Build Coastguard Worker auto producer_and_writer_id = std::make_pair(producer_id_trusted, writer_id);
296*6dbdd20aSAndroid Build Coastguard Worker ChunkID& last_chunk_id = last_chunk_id_written_[producer_and_writer_id];
297*6dbdd20aSAndroid Build Coastguard Worker static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
298*6dbdd20aSAndroid Build Coastguard Worker "This code assumes that ChunkID wraps at kMaxChunkID");
299*6dbdd20aSAndroid Build Coastguard Worker if (chunk_id - last_chunk_id < kMaxChunkID / 2) {
300*6dbdd20aSAndroid Build Coastguard Worker last_chunk_id = chunk_id;
301*6dbdd20aSAndroid Build Coastguard Worker } else {
302*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_committed_out_of_order(
303*6dbdd20aSAndroid Build Coastguard Worker stats_.chunks_committed_out_of_order() + 1);
304*6dbdd20aSAndroid Build Coastguard Worker }
305*6dbdd20aSAndroid Build Coastguard Worker
306*6dbdd20aSAndroid Build Coastguard Worker if (padding_size)
307*6dbdd20aSAndroid Build Coastguard Worker AddPaddingRecord(padding_size);
308*6dbdd20aSAndroid Build Coastguard Worker }
309*6dbdd20aSAndroid Build Coastguard Worker
DeleteNextChunksFor(size_t bytes_to_clear)310*6dbdd20aSAndroid Build Coastguard Worker ssize_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
311*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_CHECK(!discard_writes_);
312*6dbdd20aSAndroid Build Coastguard Worker
313*6dbdd20aSAndroid Build Coastguard Worker // Find the position of the first chunk which begins at or after
314*6dbdd20aSAndroid Build Coastguard Worker // (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
315*6dbdd20aSAndroid Build Coastguard Worker // either reach the end of the buffer or a zeroed region of the buffer.
316*6dbdd20aSAndroid Build Coastguard Worker uint8_t* next_chunk_ptr = wptr_;
317*6dbdd20aSAndroid Build Coastguard Worker uint8_t* search_end = wptr_ + bytes_to_clear;
318*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
319*6dbdd20aSAndroid Build Coastguard Worker DcheckIsAlignedAndWithinBounds(wptr_);
320*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(search_end <= end());
321*6dbdd20aSAndroid Build Coastguard Worker std::vector<ChunkMap::iterator> index_delete;
322*6dbdd20aSAndroid Build Coastguard Worker uint64_t chunks_overwritten = stats_.chunks_overwritten();
323*6dbdd20aSAndroid Build Coastguard Worker uint64_t bytes_overwritten = stats_.bytes_overwritten();
324*6dbdd20aSAndroid Build Coastguard Worker uint64_t padding_bytes_cleared = stats_.padding_bytes_cleared();
325*6dbdd20aSAndroid Build Coastguard Worker while (next_chunk_ptr < search_end) {
326*6dbdd20aSAndroid Build Coastguard Worker const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
327*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(
328*6dbdd20aSAndroid Build Coastguard Worker " scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
329*6dbdd20aSAndroid Build Coastguard Worker next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
330*6dbdd20aSAndroid Build Coastguard Worker
331*6dbdd20aSAndroid Build Coastguard Worker // We just reached the untouched part of the buffer, it's going to be all
332*6dbdd20aSAndroid Build Coastguard Worker // zeroes from here to end().
333*6dbdd20aSAndroid Build Coastguard Worker // Optimization: if during Initialize() we fill the buffer with padding
334*6dbdd20aSAndroid Build Coastguard Worker // records we could get rid of this branch.
335*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
336*6dbdd20aSAndroid Build Coastguard Worker // This should happen only at the first iteration. The zeroed area can
337*6dbdd20aSAndroid Build Coastguard Worker // only begin precisely at the |wptr_|, not after. Otherwise it means that
338*6dbdd20aSAndroid Build Coastguard Worker // we wrapped but screwed up the ChunkRecord chain.
339*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(next_chunk_ptr == wptr_);
340*6dbdd20aSAndroid Build Coastguard Worker return 0;
341*6dbdd20aSAndroid Build Coastguard Worker }
342*6dbdd20aSAndroid Build Coastguard Worker
343*6dbdd20aSAndroid Build Coastguard Worker // Remove |next_chunk| from the index, unless it's a padding record (padding
344*6dbdd20aSAndroid Build Coastguard Worker // records are not part of the index).
345*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
346*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta::Key key(next_chunk);
347*6dbdd20aSAndroid Build Coastguard Worker auto it = index_.find(key);
348*6dbdd20aSAndroid Build Coastguard Worker bool will_remove = false;
349*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_LIKELY(it != index_.end())) {
350*6dbdd20aSAndroid Build Coastguard Worker const ChunkMeta& meta = it->second;
351*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments)) {
352*6dbdd20aSAndroid Build Coastguard Worker if (overwrite_policy_ == kDiscard)
353*6dbdd20aSAndroid Build Coastguard Worker return -1;
354*6dbdd20aSAndroid Build Coastguard Worker chunks_overwritten++;
355*6dbdd20aSAndroid Build Coastguard Worker bytes_overwritten += next_chunk.size;
356*6dbdd20aSAndroid Build Coastguard Worker }
357*6dbdd20aSAndroid Build Coastguard Worker index_delete.push_back(it);
358*6dbdd20aSAndroid Build Coastguard Worker will_remove = true;
359*6dbdd20aSAndroid Build Coastguard Worker }
360*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(
361*6dbdd20aSAndroid Build Coastguard Worker " del index {%" PRIu32 ",%" PRIu32 ",%u} @ [%" PRIdPTR " - %" PRIdPTR "] %d",
362*6dbdd20aSAndroid Build Coastguard Worker key.producer_id, key.writer_id, key.chunk_id,
363*6dbdd20aSAndroid Build Coastguard Worker next_chunk_ptr - begin(), next_chunk_ptr - begin() + next_chunk.size,
364*6dbdd20aSAndroid Build Coastguard Worker will_remove);
365*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(will_remove);
366*6dbdd20aSAndroid Build Coastguard Worker } else {
367*6dbdd20aSAndroid Build Coastguard Worker padding_bytes_cleared += next_chunk.size;
368*6dbdd20aSAndroid Build Coastguard Worker }
369*6dbdd20aSAndroid Build Coastguard Worker
370*6dbdd20aSAndroid Build Coastguard Worker next_chunk_ptr += next_chunk.size;
371*6dbdd20aSAndroid Build Coastguard Worker
372*6dbdd20aSAndroid Build Coastguard Worker // We should never hit this, unless we managed to screw up while writing
373*6dbdd20aSAndroid Build Coastguard Worker // to the buffer and breaking the ChunkRecord(s) chain.
374*6dbdd20aSAndroid Build Coastguard Worker // TODO(primiano): Write more meaningful logging with the status of the
375*6dbdd20aSAndroid Build Coastguard Worker // buffer, to get more actionable bugs in case we hit this.
376*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_CHECK(next_chunk_ptr <= end());
377*6dbdd20aSAndroid Build Coastguard Worker }
378*6dbdd20aSAndroid Build Coastguard Worker
379*6dbdd20aSAndroid Build Coastguard Worker // Remove from the index.
380*6dbdd20aSAndroid Build Coastguard Worker for (auto it : index_delete) {
381*6dbdd20aSAndroid Build Coastguard Worker index_.erase(it);
382*6dbdd20aSAndroid Build Coastguard Worker }
383*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_overwritten(chunks_overwritten);
384*6dbdd20aSAndroid Build Coastguard Worker stats_.set_bytes_overwritten(bytes_overwritten);
385*6dbdd20aSAndroid Build Coastguard Worker stats_.set_padding_bytes_cleared(padding_bytes_cleared);
386*6dbdd20aSAndroid Build Coastguard Worker
387*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
388*6dbdd20aSAndroid Build Coastguard Worker return static_cast<ssize_t>(next_chunk_ptr - search_end);
389*6dbdd20aSAndroid Build Coastguard Worker }
390*6dbdd20aSAndroid Build Coastguard Worker
AddPaddingRecord(size_t size)391*6dbdd20aSAndroid Build Coastguard Worker void TraceBuffer::AddPaddingRecord(size_t size) {
392*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
393*6dbdd20aSAndroid Build Coastguard Worker ChunkRecord record(size);
394*6dbdd20aSAndroid Build Coastguard Worker record.is_padding = 1;
395*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG("AddPaddingRecord @ [%" PRIdPTR " - %" PRIdPTR "] %zu", wptr_ - begin(),
396*6dbdd20aSAndroid Build Coastguard Worker uintptr_t(wptr_ - begin()) + size, size);
397*6dbdd20aSAndroid Build Coastguard Worker WriteChunkRecord(wptr_, record, nullptr, size - sizeof(ChunkRecord));
398*6dbdd20aSAndroid Build Coastguard Worker stats_.set_padding_bytes_written(stats_.padding_bytes_written() + size);
399*6dbdd20aSAndroid Build Coastguard Worker // |wptr_| is deliberately not advanced when writing a padding record.
400*6dbdd20aSAndroid Build Coastguard Worker }
401*6dbdd20aSAndroid Build Coastguard Worker
TryPatchChunkContents(ProducerID producer_id,WriterID writer_id,ChunkID chunk_id,const Patch * patches,size_t patches_size,bool other_patches_pending)402*6dbdd20aSAndroid Build Coastguard Worker bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
403*6dbdd20aSAndroid Build Coastguard Worker WriterID writer_id,
404*6dbdd20aSAndroid Build Coastguard Worker ChunkID chunk_id,
405*6dbdd20aSAndroid Build Coastguard Worker const Patch* patches,
406*6dbdd20aSAndroid Build Coastguard Worker size_t patches_size,
407*6dbdd20aSAndroid Build Coastguard Worker bool other_patches_pending) {
408*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_CHECK(!read_only_);
409*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta::Key key(producer_id, writer_id, chunk_id);
410*6dbdd20aSAndroid Build Coastguard Worker auto it = index_.find(key);
411*6dbdd20aSAndroid Build Coastguard Worker if (it == index_.end()) {
412*6dbdd20aSAndroid Build Coastguard Worker stats_.set_patches_failed(stats_.patches_failed() + 1);
413*6dbdd20aSAndroid Build Coastguard Worker return false;
414*6dbdd20aSAndroid Build Coastguard Worker }
415*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta& chunk_meta = it->second;
416*6dbdd20aSAndroid Build Coastguard Worker
417*6dbdd20aSAndroid Build Coastguard Worker // Check that the index is consistent with the actual ProducerID/WriterID
418*6dbdd20aSAndroid Build Coastguard Worker // stored in the ChunkRecord.
419*6dbdd20aSAndroid Build Coastguard Worker
420*6dbdd20aSAndroid Build Coastguard Worker ChunkRecord* chunk_record = GetChunkRecordAt(begin() + chunk_meta.record_off);
421*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(ChunkMeta::Key(*chunk_record) == key);
422*6dbdd20aSAndroid Build Coastguard Worker uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_record);
423*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(chunk_begin >= begin());
424*6dbdd20aSAndroid Build Coastguard Worker uint8_t* chunk_end = chunk_begin + chunk_record->size;
425*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(chunk_end <= end());
426*6dbdd20aSAndroid Build Coastguard Worker
427*6dbdd20aSAndroid Build Coastguard Worker static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
428*6dbdd20aSAndroid Build Coastguard Worker "Patch::kSize out of sync with SharedMemoryABI");
429*6dbdd20aSAndroid Build Coastguard Worker
430*6dbdd20aSAndroid Build Coastguard Worker for (size_t i = 0; i < patches_size; i++) {
431*6dbdd20aSAndroid Build Coastguard Worker uint8_t* ptr =
432*6dbdd20aSAndroid Build Coastguard Worker chunk_begin + sizeof(ChunkRecord) + patches[i].offset_untrusted;
433*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
434*6dbdd20aSAndroid Build Coastguard Worker ",%u} size=%zu @ %zu with {%02x %02x %02x %02x} cur "
435*6dbdd20aSAndroid Build Coastguard Worker "{%02x %02x %02x %02x}",
436*6dbdd20aSAndroid Build Coastguard Worker producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
437*6dbdd20aSAndroid Build Coastguard Worker patches[i].offset_untrusted, patches[i].data[0],
438*6dbdd20aSAndroid Build Coastguard Worker patches[i].data[1], patches[i].data[2],
439*6dbdd20aSAndroid Build Coastguard Worker patches[i].data[3], ptr[0], ptr[1], ptr[2], ptr[3]);
440*6dbdd20aSAndroid Build Coastguard Worker if (ptr < chunk_begin + sizeof(ChunkRecord) ||
441*6dbdd20aSAndroid Build Coastguard Worker ptr > chunk_end - Patch::kSize) {
442*6dbdd20aSAndroid Build Coastguard Worker // Either the IPC was so slow and in the meantime the writer managed to
443*6dbdd20aSAndroid Build Coastguard Worker // wrap over |chunk_id| or the producer sent a malicious IPC.
444*6dbdd20aSAndroid Build Coastguard Worker stats_.set_patches_failed(stats_.patches_failed() + 1);
445*6dbdd20aSAndroid Build Coastguard Worker return false;
446*6dbdd20aSAndroid Build Coastguard Worker }
447*6dbdd20aSAndroid Build Coastguard Worker
448*6dbdd20aSAndroid Build Coastguard Worker memcpy(ptr, &patches[i].data[0], Patch::kSize);
449*6dbdd20aSAndroid Build Coastguard Worker }
450*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG("Chunk raw (after patch): %s",
451*6dbdd20aSAndroid Build Coastguard Worker base::HexDump(chunk_begin, chunk_record->size).c_str());
452*6dbdd20aSAndroid Build Coastguard Worker
453*6dbdd20aSAndroid Build Coastguard Worker stats_.set_patches_succeeded(stats_.patches_succeeded() + patches_size);
454*6dbdd20aSAndroid Build Coastguard Worker if (!other_patches_pending) {
455*6dbdd20aSAndroid Build Coastguard Worker chunk_meta.flags &= ~kChunkNeedsPatching;
456*6dbdd20aSAndroid Build Coastguard Worker chunk_record->flags = chunk_meta.flags & ChunkRecord::kFlagsBitMask;
457*6dbdd20aSAndroid Build Coastguard Worker }
458*6dbdd20aSAndroid Build Coastguard Worker return true;
459*6dbdd20aSAndroid Build Coastguard Worker }
460*6dbdd20aSAndroid Build Coastguard Worker
BeginRead()461*6dbdd20aSAndroid Build Coastguard Worker void TraceBuffer::BeginRead() {
462*6dbdd20aSAndroid Build Coastguard Worker read_iter_ = GetReadIterForSequence(index_.begin());
463*6dbdd20aSAndroid Build Coastguard Worker #if PERFETTO_DCHECK_IS_ON()
464*6dbdd20aSAndroid Build Coastguard Worker changed_since_last_read_ = false;
465*6dbdd20aSAndroid Build Coastguard Worker #endif
466*6dbdd20aSAndroid Build Coastguard Worker }
467*6dbdd20aSAndroid Build Coastguard Worker
GetReadIterForSequence(ChunkMap::iterator seq_begin)468*6dbdd20aSAndroid Build Coastguard Worker TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
469*6dbdd20aSAndroid Build Coastguard Worker ChunkMap::iterator seq_begin) {
470*6dbdd20aSAndroid Build Coastguard Worker SequenceIterator iter;
471*6dbdd20aSAndroid Build Coastguard Worker iter.seq_begin = seq_begin;
472*6dbdd20aSAndroid Build Coastguard Worker if (seq_begin == index_.end()) {
473*6dbdd20aSAndroid Build Coastguard Worker iter.cur = iter.seq_end = index_.end();
474*6dbdd20aSAndroid Build Coastguard Worker return iter;
475*6dbdd20aSAndroid Build Coastguard Worker }
476*6dbdd20aSAndroid Build Coastguard Worker
477*6dbdd20aSAndroid Build Coastguard Worker #if PERFETTO_DCHECK_IS_ON()
478*6dbdd20aSAndroid Build Coastguard Worker // Either |seq_begin| is == index_.begin() or the item immediately before must
479*6dbdd20aSAndroid Build Coastguard Worker // belong to a different {ProducerID, WriterID} sequence.
480*6dbdd20aSAndroid Build Coastguard Worker if (seq_begin != index_.begin() && seq_begin != index_.end()) {
481*6dbdd20aSAndroid Build Coastguard Worker auto prev_it = seq_begin;
482*6dbdd20aSAndroid Build Coastguard Worker prev_it--;
483*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(
484*6dbdd20aSAndroid Build Coastguard Worker seq_begin == index_.begin() ||
485*6dbdd20aSAndroid Build Coastguard Worker std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
486*6dbdd20aSAndroid Build Coastguard Worker std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
487*6dbdd20aSAndroid Build Coastguard Worker }
488*6dbdd20aSAndroid Build Coastguard Worker #endif
489*6dbdd20aSAndroid Build Coastguard Worker
490*6dbdd20aSAndroid Build Coastguard Worker // Find the first entry that has a greater {ProducerID, WriterID} (or just
491*6dbdd20aSAndroid Build Coastguard Worker // index_.end() if we reached the end).
492*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta::Key key = seq_begin->first; // Deliberate copy.
493*6dbdd20aSAndroid Build Coastguard Worker key.chunk_id = kMaxChunkID;
494*6dbdd20aSAndroid Build Coastguard Worker iter.seq_end = index_.upper_bound(key);
495*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
496*6dbdd20aSAndroid Build Coastguard Worker
497*6dbdd20aSAndroid Build Coastguard Worker // Now find the first entry between [seq_begin, seq_end) that is
498*6dbdd20aSAndroid Build Coastguard Worker // > last_chunk_id_written_. This is where we the sequence will start (see
499*6dbdd20aSAndroid Build Coastguard Worker // notes about wrapping of IDs in the header).
500*6dbdd20aSAndroid Build Coastguard Worker auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
501*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(last_chunk_id_written_.count(producer_and_writer_id));
502*6dbdd20aSAndroid Build Coastguard Worker iter.wrapping_id = last_chunk_id_written_[producer_and_writer_id];
503*6dbdd20aSAndroid Build Coastguard Worker key.chunk_id = iter.wrapping_id;
504*6dbdd20aSAndroid Build Coastguard Worker iter.cur = index_.upper_bound(key);
505*6dbdd20aSAndroid Build Coastguard Worker if (iter.cur == iter.seq_end)
506*6dbdd20aSAndroid Build Coastguard Worker iter.cur = iter.seq_begin;
507*6dbdd20aSAndroid Build Coastguard Worker return iter;
508*6dbdd20aSAndroid Build Coastguard Worker }
509*6dbdd20aSAndroid Build Coastguard Worker
MoveNext()510*6dbdd20aSAndroid Build Coastguard Worker void TraceBuffer::SequenceIterator::MoveNext() {
511*6dbdd20aSAndroid Build Coastguard Worker // Stop iterating when we reach the end of the sequence.
512*6dbdd20aSAndroid Build Coastguard Worker // Note: |seq_begin| might be == |seq_end|.
513*6dbdd20aSAndroid Build Coastguard Worker if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
514*6dbdd20aSAndroid Build Coastguard Worker cur = seq_end;
515*6dbdd20aSAndroid Build Coastguard Worker return;
516*6dbdd20aSAndroid Build Coastguard Worker }
517*6dbdd20aSAndroid Build Coastguard Worker
518*6dbdd20aSAndroid Build Coastguard Worker // If the current chunk wasn't completed yet, we shouldn't advance past it as
519*6dbdd20aSAndroid Build Coastguard Worker // it may be rewritten with additional packets.
520*6dbdd20aSAndroid Build Coastguard Worker if (!cur->second.is_complete()) {
521*6dbdd20aSAndroid Build Coastguard Worker cur = seq_end;
522*6dbdd20aSAndroid Build Coastguard Worker return;
523*6dbdd20aSAndroid Build Coastguard Worker }
524*6dbdd20aSAndroid Build Coastguard Worker
525*6dbdd20aSAndroid Build Coastguard Worker ChunkID last_chunk_id = cur->first.chunk_id;
526*6dbdd20aSAndroid Build Coastguard Worker if (++cur == seq_end)
527*6dbdd20aSAndroid Build Coastguard Worker cur = seq_begin;
528*6dbdd20aSAndroid Build Coastguard Worker
529*6dbdd20aSAndroid Build Coastguard Worker // There may be a missing chunk in the sequence of chunks, in which case the
530*6dbdd20aSAndroid Build Coastguard Worker // next chunk's ID won't follow the last one's. If so, skip the rest of the
531*6dbdd20aSAndroid Build Coastguard Worker // sequence. We'll return to it later once the hole is filled.
532*6dbdd20aSAndroid Build Coastguard Worker if (last_chunk_id + 1 != cur->first.chunk_id)
533*6dbdd20aSAndroid Build Coastguard Worker cur = seq_end;
534*6dbdd20aSAndroid Build Coastguard Worker }
535*6dbdd20aSAndroid Build Coastguard Worker
ReadNextTracePacket(TracePacket * packet,PacketSequenceProperties * sequence_properties,bool * previous_packet_on_sequence_dropped)536*6dbdd20aSAndroid Build Coastguard Worker bool TraceBuffer::ReadNextTracePacket(
537*6dbdd20aSAndroid Build Coastguard Worker TracePacket* packet,
538*6dbdd20aSAndroid Build Coastguard Worker PacketSequenceProperties* sequence_properties,
539*6dbdd20aSAndroid Build Coastguard Worker bool* previous_packet_on_sequence_dropped) {
540*6dbdd20aSAndroid Build Coastguard Worker // Note: MoveNext() moves only within the next chunk within the same
541*6dbdd20aSAndroid Build Coastguard Worker // {ProducerID, WriterID} sequence. Here we want to:
542*6dbdd20aSAndroid Build Coastguard Worker // - return the next patched+complete packet in the current sequence, if any.
543*6dbdd20aSAndroid Build Coastguard Worker // - return the first patched+complete packet in the next sequence, if any.
544*6dbdd20aSAndroid Build Coastguard Worker // - return false if none of the above is found.
545*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG("ReadNextTracePacket()");
546*6dbdd20aSAndroid Build Coastguard Worker
547*6dbdd20aSAndroid Build Coastguard Worker // Just in case we forget to initialize these below.
548*6dbdd20aSAndroid Build Coastguard Worker *sequence_properties = {0, ClientIdentity(), 0};
549*6dbdd20aSAndroid Build Coastguard Worker *previous_packet_on_sequence_dropped = false;
550*6dbdd20aSAndroid Build Coastguard Worker
551*6dbdd20aSAndroid Build Coastguard Worker // At the start of each sequence iteration, we consider the last read packet
552*6dbdd20aSAndroid Build Coastguard Worker // dropped. While iterating over the chunks in the sequence, we update this
553*6dbdd20aSAndroid Build Coastguard Worker // flag based on our knowledge about the last packet that was read from each
554*6dbdd20aSAndroid Build Coastguard Worker // chunk (|last_read_packet_skipped| in ChunkMeta).
555*6dbdd20aSAndroid Build Coastguard Worker bool previous_packet_dropped = true;
556*6dbdd20aSAndroid Build Coastguard Worker
557*6dbdd20aSAndroid Build Coastguard Worker #if PERFETTO_DCHECK_IS_ON()
558*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(!changed_since_last_read_);
559*6dbdd20aSAndroid Build Coastguard Worker #endif
560*6dbdd20aSAndroid Build Coastguard Worker for (;; read_iter_.MoveNext()) {
561*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
562*6dbdd20aSAndroid Build Coastguard Worker // We ran out of chunks in the current {ProducerID, WriterID} sequence or
563*6dbdd20aSAndroid Build Coastguard Worker // we just reached the index_.end().
564*6dbdd20aSAndroid Build Coastguard Worker
565*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
566*6dbdd20aSAndroid Build Coastguard Worker return false;
567*6dbdd20aSAndroid Build Coastguard Worker
568*6dbdd20aSAndroid Build Coastguard Worker // We reached the end of sequence, move to the next one.
569*6dbdd20aSAndroid Build Coastguard Worker // Note: ++read_iter_.seq_end might become index_.end(), but
570*6dbdd20aSAndroid Build Coastguard Worker // GetReadIterForSequence() knows how to deal with that.
571*6dbdd20aSAndroid Build Coastguard Worker read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
572*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
573*6dbdd20aSAndroid Build Coastguard Worker previous_packet_dropped = true;
574*6dbdd20aSAndroid Build Coastguard Worker }
575*6dbdd20aSAndroid Build Coastguard Worker
576*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta* chunk_meta = &*read_iter_;
577*6dbdd20aSAndroid Build Coastguard Worker
578*6dbdd20aSAndroid Build Coastguard Worker // If the chunk has holes that are awaiting to be patched out-of-band,
579*6dbdd20aSAndroid Build Coastguard Worker // skip the current sequence and move to the next one.
580*6dbdd20aSAndroid Build Coastguard Worker if (chunk_meta->flags & kChunkNeedsPatching) {
581*6dbdd20aSAndroid Build Coastguard Worker read_iter_.MoveToEnd();
582*6dbdd20aSAndroid Build Coastguard Worker continue;
583*6dbdd20aSAndroid Build Coastguard Worker }
584*6dbdd20aSAndroid Build Coastguard Worker
585*6dbdd20aSAndroid Build Coastguard Worker const ProducerID trusted_producer_id = read_iter_.producer_id();
586*6dbdd20aSAndroid Build Coastguard Worker const WriterID writer_id = read_iter_.writer_id();
587*6dbdd20aSAndroid Build Coastguard Worker const ProducerAndWriterID producer_and_writer_id =
588*6dbdd20aSAndroid Build Coastguard Worker MkProducerAndWriterID(trusted_producer_id, writer_id);
589*6dbdd20aSAndroid Build Coastguard Worker const ClientIdentity& client_identity = chunk_meta->client_identity_trusted;
590*6dbdd20aSAndroid Build Coastguard Worker
591*6dbdd20aSAndroid Build Coastguard Worker // At this point we have a chunk in |chunk_meta| that has not been fully
592*6dbdd20aSAndroid Build Coastguard Worker // read. We don't know yet whether we have enough data to read the full
593*6dbdd20aSAndroid Build Coastguard Worker // packet (in the case it's fragmented over several chunks) and we are about
594*6dbdd20aSAndroid Build Coastguard Worker // to find that out. Specifically:
595*6dbdd20aSAndroid Build Coastguard Worker // A) If the first fragment is unread and is a fragment continuing from a
596*6dbdd20aSAndroid Build Coastguard Worker // previous chunk, it means we have missed the previous ChunkID. In
597*6dbdd20aSAndroid Build Coastguard Worker // fact, if this wasn't the case, a previous call to ReadNext() shouldn't
598*6dbdd20aSAndroid Build Coastguard Worker // have moved the cursor to this chunk.
599*6dbdd20aSAndroid Build Coastguard Worker // B) Any fragment > 0 && < last is always readable. By definition an inner
600*6dbdd20aSAndroid Build Coastguard Worker // packet is never fragmented and hence doesn't require neither stitching
601*6dbdd20aSAndroid Build Coastguard Worker // nor any out-of-band patching. The same applies to the last packet
602*6dbdd20aSAndroid Build Coastguard Worker // iff it doesn't continue on the next chunk.
603*6dbdd20aSAndroid Build Coastguard Worker // C) If the last packet (which might be also the only packet in the chunk)
604*6dbdd20aSAndroid Build Coastguard Worker // is a fragment and continues on the next chunk, we peek at the next
605*6dbdd20aSAndroid Build Coastguard Worker // chunks and, if we have all of them, mark as read and move the cursor.
606*6dbdd20aSAndroid Build Coastguard Worker //
607*6dbdd20aSAndroid Build Coastguard Worker // +---------------+ +-------------------+ +---------------+
608*6dbdd20aSAndroid Build Coastguard Worker // | ChunkID: 1 | | ChunkID: 2 | | ChunkID: 3 |
609*6dbdd20aSAndroid Build Coastguard Worker // |---------------+ +-------------------+ +---------------+
610*6dbdd20aSAndroid Build Coastguard Worker // | Packet 1 | | | | ... Packet 3 |
611*6dbdd20aSAndroid Build Coastguard Worker // | Packet 2 | | ... Packet 3 ... | | Packet 4 |
612*6dbdd20aSAndroid Build Coastguard Worker // | Packet 3 ... | | | | Packet 5 ... |
613*6dbdd20aSAndroid Build Coastguard Worker // +---------------+ +-------------------+ +---------------+
614*6dbdd20aSAndroid Build Coastguard Worker
615*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
616*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->num_fragments);
617*6dbdd20aSAndroid Build Coastguard Worker
618*6dbdd20aSAndroid Build Coastguard Worker // If we didn't read any packets from this chunk, the last packet was from
619*6dbdd20aSAndroid Build Coastguard Worker // the previous chunk we iterated over; so don't update
620*6dbdd20aSAndroid Build Coastguard Worker // |previous_packet_dropped| in this case.
621*6dbdd20aSAndroid Build Coastguard Worker if (chunk_meta->num_fragments_read > 0)
622*6dbdd20aSAndroid Build Coastguard Worker previous_packet_dropped = chunk_meta->last_read_packet_skipped();
623*6dbdd20aSAndroid Build Coastguard Worker
624*6dbdd20aSAndroid Build Coastguard Worker while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
625*6dbdd20aSAndroid Build Coastguard Worker enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
626*6dbdd20aSAndroid Build Coastguard Worker if (chunk_meta->num_fragments_read == 0) {
627*6dbdd20aSAndroid Build Coastguard Worker if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
628*6dbdd20aSAndroid Build Coastguard Worker action = kSkip; // Case A.
629*6dbdd20aSAndroid Build Coastguard Worker } else if (chunk_meta->num_fragments == 1 &&
630*6dbdd20aSAndroid Build Coastguard Worker (chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
631*6dbdd20aSAndroid Build Coastguard Worker action = kTryReadAhead; // Case C.
632*6dbdd20aSAndroid Build Coastguard Worker } else {
633*6dbdd20aSAndroid Build Coastguard Worker action = kReadOnePacket; // Case B.
634*6dbdd20aSAndroid Build Coastguard Worker }
635*6dbdd20aSAndroid Build Coastguard Worker } else if (chunk_meta->num_fragments_read <
636*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->num_fragments - 1 ||
637*6dbdd20aSAndroid Build Coastguard Worker !(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
638*6dbdd20aSAndroid Build Coastguard Worker action = kReadOnePacket; // Case B.
639*6dbdd20aSAndroid Build Coastguard Worker } else {
640*6dbdd20aSAndroid Build Coastguard Worker action = kTryReadAhead; // Case C.
641*6dbdd20aSAndroid Build Coastguard Worker }
642*6dbdd20aSAndroid Build Coastguard Worker
643*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" chunk %u, packet %hu of %hu, action=%d",
644*6dbdd20aSAndroid Build Coastguard Worker read_iter_.chunk_id(), chunk_meta->num_fragments_read,
645*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->num_fragments, action);
646*6dbdd20aSAndroid Build Coastguard Worker
647*6dbdd20aSAndroid Build Coastguard Worker if (action == kSkip) {
648*6dbdd20aSAndroid Build Coastguard Worker // This fragment will be skipped forever, not just in this ReadPacket()
649*6dbdd20aSAndroid Build Coastguard Worker // iteration. This happens by virtue of ReadNextPacketInChunk()
650*6dbdd20aSAndroid Build Coastguard Worker // incrementing the |num_fragments_read| and marking the fragment as
651*6dbdd20aSAndroid Build Coastguard Worker // read even if we didn't really.
652*6dbdd20aSAndroid Build Coastguard Worker ReadNextPacketInChunk(producer_and_writer_id, chunk_meta, nullptr);
653*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->set_last_read_packet_skipped(true);
654*6dbdd20aSAndroid Build Coastguard Worker previous_packet_dropped = true;
655*6dbdd20aSAndroid Build Coastguard Worker continue;
656*6dbdd20aSAndroid Build Coastguard Worker }
657*6dbdd20aSAndroid Build Coastguard Worker
658*6dbdd20aSAndroid Build Coastguard Worker if (action == kReadOnePacket) {
659*6dbdd20aSAndroid Build Coastguard Worker // The easy peasy case B.
660*6dbdd20aSAndroid Build Coastguard Worker ReadPacketResult result =
661*6dbdd20aSAndroid Build Coastguard Worker ReadNextPacketInChunk(producer_and_writer_id, chunk_meta, packet);
662*6dbdd20aSAndroid Build Coastguard Worker
663*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_LIKELY(result == ReadPacketResult::kSucceeded)) {
664*6dbdd20aSAndroid Build Coastguard Worker *sequence_properties = {trusted_producer_id, client_identity,
665*6dbdd20aSAndroid Build Coastguard Worker writer_id};
666*6dbdd20aSAndroid Build Coastguard Worker *previous_packet_on_sequence_dropped = previous_packet_dropped;
667*6dbdd20aSAndroid Build Coastguard Worker return true;
668*6dbdd20aSAndroid Build Coastguard Worker } else if (result == ReadPacketResult::kFailedEmptyPacket) {
669*6dbdd20aSAndroid Build Coastguard Worker // We can ignore and skip empty packets.
670*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(packet->slices().empty());
671*6dbdd20aSAndroid Build Coastguard Worker continue;
672*6dbdd20aSAndroid Build Coastguard Worker }
673*6dbdd20aSAndroid Build Coastguard Worker
674*6dbdd20aSAndroid Build Coastguard Worker // In extremely rare cases (producer bugged / malicious) the chunk might
675*6dbdd20aSAndroid Build Coastguard Worker // contain an invalid fragment. In such case we don't want to stall the
676*6dbdd20aSAndroid Build Coastguard Worker // sequence but just skip the chunk and move on. ReadNextPacketInChunk()
677*6dbdd20aSAndroid Build Coastguard Worker // marks the chunk as fully read, so we don't attempt to read from it
678*6dbdd20aSAndroid Build Coastguard Worker // again in a future call to ReadBuffers(). It also already records an
679*6dbdd20aSAndroid Build Coastguard Worker // abi violation for this.
680*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(result == ReadPacketResult::kFailedInvalidPacket);
681*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->set_last_read_packet_skipped(true);
682*6dbdd20aSAndroid Build Coastguard Worker previous_packet_dropped = true;
683*6dbdd20aSAndroid Build Coastguard Worker break;
684*6dbdd20aSAndroid Build Coastguard Worker }
685*6dbdd20aSAndroid Build Coastguard Worker
686*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(action == kTryReadAhead);
687*6dbdd20aSAndroid Build Coastguard Worker ReadAheadResult ra_res = ReadAhead(packet);
688*6dbdd20aSAndroid Build Coastguard Worker if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
689*6dbdd20aSAndroid Build Coastguard Worker stats_.set_readaheads_succeeded(stats_.readaheads_succeeded() + 1);
690*6dbdd20aSAndroid Build Coastguard Worker *sequence_properties = {trusted_producer_id, client_identity,
691*6dbdd20aSAndroid Build Coastguard Worker writer_id};
692*6dbdd20aSAndroid Build Coastguard Worker *previous_packet_on_sequence_dropped = previous_packet_dropped;
693*6dbdd20aSAndroid Build Coastguard Worker return true;
694*6dbdd20aSAndroid Build Coastguard Worker }
695*6dbdd20aSAndroid Build Coastguard Worker
696*6dbdd20aSAndroid Build Coastguard Worker if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
697*6dbdd20aSAndroid Build Coastguard Worker // readahead didn't find a contiguous packet sequence. We'll try again
698*6dbdd20aSAndroid Build Coastguard Worker // on the next ReadPacket() call.
699*6dbdd20aSAndroid Build Coastguard Worker stats_.set_readaheads_failed(stats_.readaheads_failed() + 1);
700*6dbdd20aSAndroid Build Coastguard Worker
701*6dbdd20aSAndroid Build Coastguard Worker // TODO(primiano): optimization: this MoveToEnd() is the reason why
702*6dbdd20aSAndroid Build Coastguard Worker // MoveNext() (that is called in the outer for(;;MoveNext)) needs to
703*6dbdd20aSAndroid Build Coastguard Worker // deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
704*6dbdd20aSAndroid Build Coastguard Worker // something to avoid that check by reshuffling the code here?
705*6dbdd20aSAndroid Build Coastguard Worker read_iter_.MoveToEnd();
706*6dbdd20aSAndroid Build Coastguard Worker
707*6dbdd20aSAndroid Build Coastguard Worker // This break will go back to beginning of the for(;;MoveNext()). That
708*6dbdd20aSAndroid Build Coastguard Worker // will move to the next sequence because we set the read iterator to
709*6dbdd20aSAndroid Build Coastguard Worker // its end.
710*6dbdd20aSAndroid Build Coastguard Worker break;
711*6dbdd20aSAndroid Build Coastguard Worker }
712*6dbdd20aSAndroid Build Coastguard Worker
713*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
714*6dbdd20aSAndroid Build Coastguard Worker
715*6dbdd20aSAndroid Build Coastguard Worker // In this case ReadAhead() might advance |read_iter_|, so we need to
716*6dbdd20aSAndroid Build Coastguard Worker // re-cache the |chunk_meta| pointer to point to the current chunk.
717*6dbdd20aSAndroid Build Coastguard Worker chunk_meta = &*read_iter_;
718*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->set_last_read_packet_skipped(true);
719*6dbdd20aSAndroid Build Coastguard Worker previous_packet_dropped = true;
720*6dbdd20aSAndroid Build Coastguard Worker } // while(...) [iterate over packet fragments for the current chunk].
721*6dbdd20aSAndroid Build Coastguard Worker } // for(;;MoveNext()) [iterate over chunks].
722*6dbdd20aSAndroid Build Coastguard Worker }
723*6dbdd20aSAndroid Build Coastguard Worker
ReadAhead(TracePacket * packet)724*6dbdd20aSAndroid Build Coastguard Worker TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
725*6dbdd20aSAndroid Build Coastguard Worker static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
726*6dbdd20aSAndroid Build Coastguard Worker "relying on kMaxChunkID to wrap naturally");
727*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
728*6dbdd20aSAndroid Build Coastguard Worker ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
729*6dbdd20aSAndroid Build Coastguard Worker SequenceIterator it = read_iter_;
730*6dbdd20aSAndroid Build Coastguard Worker for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
731*6dbdd20aSAndroid Build Coastguard Worker // We should stay within the same sequence while iterating here.
732*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
733*6dbdd20aSAndroid Build Coastguard Worker it.writer_id() == read_iter_.writer_id());
734*6dbdd20aSAndroid Build Coastguard Worker
735*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" expected chunk ID: %u, actual ID: %u", next_chunk_id,
736*6dbdd20aSAndroid Build Coastguard Worker it.chunk_id());
737*6dbdd20aSAndroid Build Coastguard Worker
738*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
739*6dbdd20aSAndroid Build Coastguard Worker continue;
740*6dbdd20aSAndroid Build Coastguard Worker
741*6dbdd20aSAndroid Build Coastguard Worker // If we miss the next chunk, stop looking in the current sequence and
742*6dbdd20aSAndroid Build Coastguard Worker // try another sequence. This chunk might come in the near future.
743*6dbdd20aSAndroid Build Coastguard Worker // The second condition is the edge case of a buggy/malicious
744*6dbdd20aSAndroid Build Coastguard Worker // producer. The ChunkID is contiguous but its flags don't make sense.
745*6dbdd20aSAndroid Build Coastguard Worker if (it.chunk_id() != next_chunk_id ||
746*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_UNLIKELY(
747*6dbdd20aSAndroid Build Coastguard Worker !((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
748*6dbdd20aSAndroid Build Coastguard Worker return ReadAheadResult::kFailedMoveToNextSequence;
749*6dbdd20aSAndroid Build Coastguard Worker }
750*6dbdd20aSAndroid Build Coastguard Worker
751*6dbdd20aSAndroid Build Coastguard Worker // If the chunk is contiguous but has not been patched yet move to the next
752*6dbdd20aSAndroid Build Coastguard Worker // sequence and try coming back here on the next ReadNextTracePacket() call.
753*6dbdd20aSAndroid Build Coastguard Worker // TODO(primiano): add a test to cover this, it's a subtle case.
754*6dbdd20aSAndroid Build Coastguard Worker if ((*it).flags & kChunkNeedsPatching)
755*6dbdd20aSAndroid Build Coastguard Worker return ReadAheadResult::kFailedMoveToNextSequence;
756*6dbdd20aSAndroid Build Coastguard Worker
757*6dbdd20aSAndroid Build Coastguard Worker // This is the case of an intermediate chunk which contains only one
758*6dbdd20aSAndroid Build Coastguard Worker // fragment which continues on the next chunk. This is the case for large
759*6dbdd20aSAndroid Build Coastguard Worker // packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
760*6dbdd20aSAndroid Build Coastguard Worker // (Packet1(X) := fragment X of Packet1).
761*6dbdd20aSAndroid Build Coastguard Worker if ((*it).num_fragments == 1 &&
762*6dbdd20aSAndroid Build Coastguard Worker ((*it).flags & kLastPacketContinuesOnNextChunk)) {
763*6dbdd20aSAndroid Build Coastguard Worker continue;
764*6dbdd20aSAndroid Build Coastguard Worker }
765*6dbdd20aSAndroid Build Coastguard Worker
766*6dbdd20aSAndroid Build Coastguard Worker // We made it! We got all fragments for the packet without holes.
767*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" readahead success @ chunk %u", it.chunk_id());
768*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(((*it).num_fragments == 1 &&
769*6dbdd20aSAndroid Build Coastguard Worker !((*it).flags & kLastPacketContinuesOnNextChunk)) ||
770*6dbdd20aSAndroid Build Coastguard Worker (*it).num_fragments > 1);
771*6dbdd20aSAndroid Build Coastguard Worker
772*6dbdd20aSAndroid Build Coastguard Worker // Now let's re-iterate over the [read_iter_, it] sequence and mark
773*6dbdd20aSAndroid Build Coastguard Worker // all the fragments as read.
774*6dbdd20aSAndroid Build Coastguard Worker bool packet_corruption = false;
775*6dbdd20aSAndroid Build Coastguard Worker for (;;) {
776*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(read_iter_.is_valid());
777*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" commit chunk %u", read_iter_.chunk_id());
778*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
779*6dbdd20aSAndroid Build Coastguard Worker // In the unlikely case of a corrupted packet (corrupted or empty
780*6dbdd20aSAndroid Build Coastguard Worker // fragment), invalidate the all stitching and move on to the next chunk
781*6dbdd20aSAndroid Build Coastguard Worker // in the same sequence, if any.
782*6dbdd20aSAndroid Build Coastguard Worker auto pw_id = MkProducerAndWriterID(it.producer_id(), it.writer_id());
783*6dbdd20aSAndroid Build Coastguard Worker packet_corruption |=
784*6dbdd20aSAndroid Build Coastguard Worker ReadNextPacketInChunk(pw_id, &*read_iter_, packet) ==
785*6dbdd20aSAndroid Build Coastguard Worker ReadPacketResult::kFailedInvalidPacket;
786*6dbdd20aSAndroid Build Coastguard Worker }
787*6dbdd20aSAndroid Build Coastguard Worker if (read_iter_.cur == it.cur)
788*6dbdd20aSAndroid Build Coastguard Worker break;
789*6dbdd20aSAndroid Build Coastguard Worker read_iter_.MoveNext();
790*6dbdd20aSAndroid Build Coastguard Worker } // for(;;)
791*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(read_iter_.cur == it.cur);
792*6dbdd20aSAndroid Build Coastguard Worker
793*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(packet_corruption)) {
794*6dbdd20aSAndroid Build Coastguard Worker // ReadNextPacketInChunk() already records an abi violation for this case.
795*6dbdd20aSAndroid Build Coastguard Worker *packet = TracePacket(); // clear.
796*6dbdd20aSAndroid Build Coastguard Worker return ReadAheadResult::kFailedStayOnSameSequence;
797*6dbdd20aSAndroid Build Coastguard Worker }
798*6dbdd20aSAndroid Build Coastguard Worker
799*6dbdd20aSAndroid Build Coastguard Worker return ReadAheadResult::kSucceededReturnSlices;
800*6dbdd20aSAndroid Build Coastguard Worker } // for(it...) [readahead loop]
801*6dbdd20aSAndroid Build Coastguard Worker return ReadAheadResult::kFailedMoveToNextSequence;
802*6dbdd20aSAndroid Build Coastguard Worker }
803*6dbdd20aSAndroid Build Coastguard Worker
ReadNextPacketInChunk(ProducerAndWriterID producer_and_writer_id,ChunkMeta * const chunk_meta,TracePacket * packet)804*6dbdd20aSAndroid Build Coastguard Worker TraceBuffer::ReadPacketResult TraceBuffer::ReadNextPacketInChunk(
805*6dbdd20aSAndroid Build Coastguard Worker ProducerAndWriterID producer_and_writer_id,
806*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta* const chunk_meta,
807*6dbdd20aSAndroid Build Coastguard Worker TracePacket* packet) {
808*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
809*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
810*6dbdd20aSAndroid Build Coastguard Worker
811*6dbdd20aSAndroid Build Coastguard Worker const uint8_t* record_begin = begin() + chunk_meta->record_off;
812*6dbdd20aSAndroid Build Coastguard Worker DcheckIsAlignedAndWithinBounds(record_begin);
813*6dbdd20aSAndroid Build Coastguard Worker auto* chunk_record = reinterpret_cast<const ChunkRecord*>(record_begin);
814*6dbdd20aSAndroid Build Coastguard Worker const uint8_t* record_end = record_begin + chunk_record->size;
815*6dbdd20aSAndroid Build Coastguard Worker const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
816*6dbdd20aSAndroid Build Coastguard Worker const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
817*6dbdd20aSAndroid Build Coastguard Worker
818*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
819*6dbdd20aSAndroid Build Coastguard Worker packet_begin >= record_end)) {
820*6dbdd20aSAndroid Build Coastguard Worker // The producer has a bug or is malicious and did declare that the chunk
821*6dbdd20aSAndroid Build Coastguard Worker // contains more packets beyond its boundaries.
822*6dbdd20aSAndroid Build Coastguard Worker stats_.set_abi_violations(stats_.abi_violations() + 1);
823*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
824*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->cur_fragment_offset = 0;
825*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->num_fragments_read = chunk_meta->num_fragments;
826*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
827*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_read(stats_.chunks_read() + 1);
828*6dbdd20aSAndroid Build Coastguard Worker stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
829*6dbdd20aSAndroid Build Coastguard Worker }
830*6dbdd20aSAndroid Build Coastguard Worker return ReadPacketResult::kFailedInvalidPacket;
831*6dbdd20aSAndroid Build Coastguard Worker }
832*6dbdd20aSAndroid Build Coastguard Worker
833*6dbdd20aSAndroid Build Coastguard Worker // A packet (or a fragment) starts with a varint stating its size, followed
834*6dbdd20aSAndroid Build Coastguard Worker // by its content. The varint shouldn't be larger than 4 bytes (just in case
835*6dbdd20aSAndroid Build Coastguard Worker // the producer is using a redundant encoding)
836*6dbdd20aSAndroid Build Coastguard Worker uint64_t packet_size = 0;
837*6dbdd20aSAndroid Build Coastguard Worker const uint8_t* header_end =
838*6dbdd20aSAndroid Build Coastguard Worker std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
839*6dbdd20aSAndroid Build Coastguard Worker record_end);
840*6dbdd20aSAndroid Build Coastguard Worker const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
841*6dbdd20aSAndroid Build Coastguard Worker packet_begin, header_end, &packet_size);
842*6dbdd20aSAndroid Build Coastguard Worker
843*6dbdd20aSAndroid Build Coastguard Worker const uint8_t* next_packet = packet_data + packet_size;
844*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
845*6dbdd20aSAndroid Build Coastguard Worker next_packet > record_end)) {
846*6dbdd20aSAndroid Build Coastguard Worker // In BufferExhaustedPolicy::kDrop mode, TraceWriter may abort a fragmented
847*6dbdd20aSAndroid Build Coastguard Worker // packet by writing an invalid size in the last fragment's header. We
848*6dbdd20aSAndroid Build Coastguard Worker // should handle this case without recording an ABI violation (since Android
849*6dbdd20aSAndroid Build Coastguard Worker // R).
850*6dbdd20aSAndroid Build Coastguard Worker if (packet_size != SharedMemoryABI::kPacketSizeDropPacket) {
851*6dbdd20aSAndroid Build Coastguard Worker stats_.set_abi_violations(stats_.abi_violations() + 1);
852*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
853*6dbdd20aSAndroid Build Coastguard Worker } else {
854*6dbdd20aSAndroid Build Coastguard Worker stats_.set_trace_writer_packet_loss(stats_.trace_writer_packet_loss() +
855*6dbdd20aSAndroid Build Coastguard Worker 1);
856*6dbdd20aSAndroid Build Coastguard Worker }
857*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->cur_fragment_offset = 0;
858*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->num_fragments_read = chunk_meta->num_fragments;
859*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
860*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_read(stats_.chunks_read() + 1);
861*6dbdd20aSAndroid Build Coastguard Worker stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
862*6dbdd20aSAndroid Build Coastguard Worker }
863*6dbdd20aSAndroid Build Coastguard Worker return ReadPacketResult::kFailedInvalidPacket;
864*6dbdd20aSAndroid Build Coastguard Worker }
865*6dbdd20aSAndroid Build Coastguard Worker
866*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->cur_fragment_offset =
867*6dbdd20aSAndroid Build Coastguard Worker static_cast<uint16_t>(next_packet - packets_begin);
868*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->num_fragments_read++;
869*6dbdd20aSAndroid Build Coastguard Worker
870*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(chunk_meta->num_fragments_read ==
871*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->num_fragments &&
872*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->is_complete())) {
873*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_read(stats_.chunks_read() + 1);
874*6dbdd20aSAndroid Build Coastguard Worker stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
875*6dbdd20aSAndroid Build Coastguard Worker auto* writer_stats = writer_stats_.Insert(producer_and_writer_id, {}).first;
876*6dbdd20aSAndroid Build Coastguard Worker writer_stats->used_chunk_hist.Add(chunk_meta->cur_fragment_offset);
877*6dbdd20aSAndroid Build Coastguard Worker } else {
878*6dbdd20aSAndroid Build Coastguard Worker // We have at least one more packet to parse. It should be within the chunk.
879*6dbdd20aSAndroid Build Coastguard Worker if (chunk_meta->cur_fragment_offset + sizeof(ChunkRecord) >=
880*6dbdd20aSAndroid Build Coastguard Worker chunk_record->size) {
881*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
882*6dbdd20aSAndroid Build Coastguard Worker }
883*6dbdd20aSAndroid Build Coastguard Worker }
884*6dbdd20aSAndroid Build Coastguard Worker
885*6dbdd20aSAndroid Build Coastguard Worker chunk_meta->set_last_read_packet_skipped(false);
886*6dbdd20aSAndroid Build Coastguard Worker
887*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_UNLIKELY(packet_size == 0))
888*6dbdd20aSAndroid Build Coastguard Worker return ReadPacketResult::kFailedEmptyPacket;
889*6dbdd20aSAndroid Build Coastguard Worker
890*6dbdd20aSAndroid Build Coastguard Worker if (PERFETTO_LIKELY(packet))
891*6dbdd20aSAndroid Build Coastguard Worker packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
892*6dbdd20aSAndroid Build Coastguard Worker
893*6dbdd20aSAndroid Build Coastguard Worker return ReadPacketResult::kSucceeded;
894*6dbdd20aSAndroid Build Coastguard Worker }
895*6dbdd20aSAndroid Build Coastguard Worker
DiscardWrite()896*6dbdd20aSAndroid Build Coastguard Worker void TraceBuffer::DiscardWrite() {
897*6dbdd20aSAndroid Build Coastguard Worker PERFETTO_DCHECK(overwrite_policy_ == kDiscard);
898*6dbdd20aSAndroid Build Coastguard Worker discard_writes_ = true;
899*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_discarded(stats_.chunks_discarded() + 1);
900*6dbdd20aSAndroid Build Coastguard Worker TRACE_BUFFER_DLOG(" discarding write");
901*6dbdd20aSAndroid Build Coastguard Worker }
902*6dbdd20aSAndroid Build Coastguard Worker
CloneReadOnly() const903*6dbdd20aSAndroid Build Coastguard Worker std::unique_ptr<TraceBuffer> TraceBuffer::CloneReadOnly() const {
904*6dbdd20aSAndroid Build Coastguard Worker std::unique_ptr<TraceBuffer> buf(new TraceBuffer(CloneCtor(), *this));
905*6dbdd20aSAndroid Build Coastguard Worker if (!buf->data_.IsValid())
906*6dbdd20aSAndroid Build Coastguard Worker return nullptr; // PagedMemory::Allocate() failed. We are out of memory.
907*6dbdd20aSAndroid Build Coastguard Worker return buf;
908*6dbdd20aSAndroid Build Coastguard Worker }
909*6dbdd20aSAndroid Build Coastguard Worker
TraceBuffer(CloneCtor,const TraceBuffer & src)910*6dbdd20aSAndroid Build Coastguard Worker TraceBuffer::TraceBuffer(CloneCtor, const TraceBuffer& src)
911*6dbdd20aSAndroid Build Coastguard Worker : overwrite_policy_(src.overwrite_policy_),
912*6dbdd20aSAndroid Build Coastguard Worker read_only_(true),
913*6dbdd20aSAndroid Build Coastguard Worker discard_writes_(src.discard_writes_) {
914*6dbdd20aSAndroid Build Coastguard Worker if (!Initialize(src.data_.size()))
915*6dbdd20aSAndroid Build Coastguard Worker return; // TraceBuffer::Clone() will check |data_| and return nullptr.
916*6dbdd20aSAndroid Build Coastguard Worker
917*6dbdd20aSAndroid Build Coastguard Worker // The assignments below must be done after Initialize().
918*6dbdd20aSAndroid Build Coastguard Worker
919*6dbdd20aSAndroid Build Coastguard Worker EnsureCommitted(src.used_size_);
920*6dbdd20aSAndroid Build Coastguard Worker memcpy(data_.Get(), src.data_.Get(), src.used_size_);
921*6dbdd20aSAndroid Build Coastguard Worker last_chunk_id_written_ = src.last_chunk_id_written_;
922*6dbdd20aSAndroid Build Coastguard Worker
923*6dbdd20aSAndroid Build Coastguard Worker stats_ = src.stats_;
924*6dbdd20aSAndroid Build Coastguard Worker stats_.set_bytes_read(0);
925*6dbdd20aSAndroid Build Coastguard Worker stats_.set_chunks_read(0);
926*6dbdd20aSAndroid Build Coastguard Worker stats_.set_readaheads_failed(0);
927*6dbdd20aSAndroid Build Coastguard Worker stats_.set_readaheads_succeeded(0);
928*6dbdd20aSAndroid Build Coastguard Worker
929*6dbdd20aSAndroid Build Coastguard Worker // Copy the index of chunk metadata and reset the read states.
930*6dbdd20aSAndroid Build Coastguard Worker index_ = ChunkMap(src.index_);
931*6dbdd20aSAndroid Build Coastguard Worker for (auto& kv : index_) {
932*6dbdd20aSAndroid Build Coastguard Worker ChunkMeta& chunk_meta = kv.second;
933*6dbdd20aSAndroid Build Coastguard Worker chunk_meta.num_fragments_read = 0;
934*6dbdd20aSAndroid Build Coastguard Worker chunk_meta.cur_fragment_offset = 0;
935*6dbdd20aSAndroid Build Coastguard Worker chunk_meta.set_last_read_packet_skipped(false);
936*6dbdd20aSAndroid Build Coastguard Worker }
937*6dbdd20aSAndroid Build Coastguard Worker read_iter_ = SequenceIterator();
938*6dbdd20aSAndroid Build Coastguard Worker }
939*6dbdd20aSAndroid Build Coastguard Worker
940*6dbdd20aSAndroid Build Coastguard Worker } // namespace perfetto
941