1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/tracing/core/shared_memory_arbiter_impl.h"
18
19 #include <bitset>
20 #include "perfetto/ext/base/utils.h"
21 #include "perfetto/ext/tracing/core/basic_types.h"
22 #include "perfetto/ext/tracing/core/commit_data_request.h"
23 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
24 #include "perfetto/ext/tracing/core/trace_packet.h"
25 #include "perfetto/ext/tracing/core/trace_writer.h"
26 #include "perfetto/ext/tracing/core/tracing_service.h"
27 #include "src/base/test/test_task_runner.h"
28 #include "src/tracing/core/in_process_shared_memory.h"
29 #include "src/tracing/core/patch_list.h"
30 #include "src/tracing/test/aligned_buffer_test.h"
31 #include "src/tracing/test/mock_producer_endpoint.h"
32 #include "test/gtest_and_gmock.h"
33
34 #include "protos/perfetto/trace/test_event.pbzero.h"
35 #include "protos/perfetto/trace/trace_packet.pbzero.h"
36
37 namespace perfetto {
38
39 using testing::_;
40 using testing::Between;
41 using testing::Invoke;
42 using testing::Mock;
43 using testing::NiceMock;
44 using testing::UnorderedElementsAreArray;
45
46 using ShmemMode = SharedMemoryABI::ShmemMode;
47
48 class SharedMemoryArbiterImplTest : public AlignedBufferTest {
49 public:
SetUp()50 void SetUp() override {
51 default_layout_ =
52 SharedMemoryArbiterImpl::default_page_layout_for_testing();
53 AlignedBufferTest::SetUp();
54 task_runner_.reset(new base::TestTaskRunner());
55 arbiter_.reset(new SharedMemoryArbiterImpl(
56 buf(), buf_size(), ShmemMode::kDefault, page_size(),
57 &mock_producer_endpoint_, task_runner_.get()));
58 }
59
IsArbiterFullyBound()60 bool IsArbiterFullyBound() { return arbiter_->fully_bound_; }
61
TearDown()62 void TearDown() override {
63 arbiter_.reset();
64 task_runner_.reset();
65 SharedMemoryArbiterImpl::set_default_layout_for_testing(default_layout_);
66 }
67
68 std::unique_ptr<base::TestTaskRunner> task_runner_;
69 std::unique_ptr<SharedMemoryArbiterImpl> arbiter_;
70 NiceMock<MockProducerEndpoint> mock_producer_endpoint_;
71 std::function<void(const std::vector<uint32_t>&)> on_pages_complete_;
72 SharedMemoryABI::PageLayout default_layout_;
73 };
74
75 size_t const kPageSizes[] = {4096, 65536};
76 INSTANTIATE_TEST_SUITE_P(PageSize,
77 SharedMemoryArbiterImplTest,
78 ::testing::ValuesIn(kPageSizes));
79
80 // The buffer has 14 pages (kNumPages), each will be partitioned in 14 chunks.
81 // The test requests 30 chunks (2 full pages + 2 chunks from a 3rd page) and
82 // releases them in different batches. It tests the consistency of the batches
83 // and the releasing order.
TEST_P(SharedMemoryArbiterImplTest,GetAndReturnChunks)84 TEST_P(SharedMemoryArbiterImplTest, GetAndReturnChunks) {
85 SharedMemoryArbiterImpl::set_default_layout_for_testing(
86 SharedMemoryABI::PageLayout::kPageDiv14);
87 static constexpr size_t kTotChunks = kNumPages * 14;
88 SharedMemoryABI::Chunk chunks[kTotChunks];
89 for (size_t i = 0; i < 14 * 2 + 2; i++) {
90 chunks[i] = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kStall);
91 ASSERT_TRUE(chunks[i].is_valid());
92 }
93
94 // Finally return the first 28 chunks (full 2 pages) and only the 2nd chunk of
95 // the 2rd page. Chunks are release in interleaved order: 1,0,3,2,5,4,7,6.
96 // Check that the notification callback is posted and order is consistent.
97 auto on_commit_1 = task_runner_->CreateCheckpoint("on_commit_1");
98 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
99 .WillOnce(Invoke([on_commit_1](const CommitDataRequest& req,
100 MockProducerEndpoint::CommitDataCallback) {
101 ASSERT_EQ(14 * 2 + 1, req.chunks_to_move_size());
102 for (size_t i = 0; i < 14 * 2; i++) {
103 ASSERT_EQ(i / 14, req.chunks_to_move()[i].page());
104 ASSERT_EQ((i % 14) ^ 1, req.chunks_to_move()[i].chunk());
105 ASSERT_EQ(i % 5 + 1, req.chunks_to_move()[i].target_buffer());
106 }
107 ASSERT_EQ(2u, req.chunks_to_move()[28].page());
108 ASSERT_EQ(1u, req.chunks_to_move()[28].chunk());
109 ASSERT_EQ(42u, req.chunks_to_move()[28].target_buffer());
110 on_commit_1();
111 }));
112 PatchList ignored;
113 for (size_t i = 0; i < 14 * 2; i++) {
114 arbiter_->ReturnCompletedChunk(std::move(chunks[i ^ 1]), i % 5 + 1,
115 &ignored);
116 }
117 arbiter_->ReturnCompletedChunk(std::move(chunks[29]), 42, &ignored);
118 task_runner_->RunUntilCheckpoint("on_commit_1");
119
120 // Then release the 1st chunk of the 3rd page, and check that we get a
121 // notification for that as well.
122 auto on_commit_2 = task_runner_->CreateCheckpoint("on_commit_2");
123 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
124 .WillOnce(Invoke([on_commit_2](const CommitDataRequest& req,
125 MockProducerEndpoint::CommitDataCallback) {
126 ASSERT_EQ(1, req.chunks_to_move_size());
127 ASSERT_EQ(2u, req.chunks_to_move()[0].page());
128 ASSERT_EQ(0u, req.chunks_to_move()[0].chunk());
129 ASSERT_EQ(43u, req.chunks_to_move()[0].target_buffer());
130 on_commit_2();
131 }));
132 arbiter_->ReturnCompletedChunk(std::move(chunks[28]), 43, &ignored);
133 task_runner_->RunUntilCheckpoint("on_commit_2");
134 }
135
TEST_P(SharedMemoryArbiterImplTest,BatchCommits)136 TEST_P(SharedMemoryArbiterImplTest, BatchCommits) {
137 SharedMemoryArbiterImpl::set_default_layout_for_testing(
138 SharedMemoryABI::PageLayout::kPageDiv1);
139
140 // Batching period is 0s - chunks are being committed as soon as they are
141 // returned.
142 SharedMemoryABI::Chunk chunk =
143 arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
144 ASSERT_TRUE(chunk.is_valid());
145 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _)).Times(1);
146 PatchList ignored;
147 arbiter_->ReturnCompletedChunk(std::move(chunk), 0, &ignored);
148 task_runner_->RunUntilIdle();
149 ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
150
151 // Since we cannot explicitly control the passage of time in task_runner_, to
152 // simulate a non-zero batching period and a commit at the end of it, set the
153 // batching duration to a very large value and call
154 // FlushPendingCommitDataRequests to manually trigger the commit.
155 arbiter_->SetDirectSMBPatchingSupportedByService();
156 ASSERT_TRUE(arbiter_->EnableDirectSMBPatching());
157 arbiter_->SetBatchCommitsDuration(UINT32_MAX);
158
159 // First chunk that will be batched. CommitData should not be called
160 // immediately this time.
161 chunk = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
162 ASSERT_TRUE(chunk.is_valid());
163 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _)).Times(0);
164 // We'll pretend that the chunk needs patching. This is done in order to
165 // verify that chunks that need patching are not marked as complete (i.e. they
166 // are kept in state kChunkBeingWritten) before the batching period ends - in
167 // case a patch for them arrives during the batching period.
168 chunk.SetFlag(SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
169 arbiter_->ReturnCompletedChunk(std::move(chunk), 1, &ignored);
170 task_runner_->RunUntilIdle();
171 ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
172 ASSERT_EQ(SharedMemoryABI::kChunkBeingWritten,
173 arbiter_->shmem_abi_for_testing()->GetChunkState(1u, 0u));
174
175 // Add a second chunk to the batch. This should also not trigger an immediate
176 // call to CommitData.
177 chunk = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
178 ASSERT_TRUE(chunk.is_valid());
179 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _)).Times(0);
180 arbiter_->ReturnCompletedChunk(std::move(chunk), 2, &ignored);
181 task_runner_->RunUntilIdle();
182 ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
183 // This chunk does not need patching, so it should be marked as complete even
184 // before the end of the batching period - to allow the service to read it in
185 // full.
186 ASSERT_EQ(SharedMemoryABI::kChunkComplete,
187 arbiter_->shmem_abi_for_testing()->GetChunkState(2u, 0u));
188
189 // Make sure that CommitData gets called once (should happen at the end
190 // of the batching period), with the two chunks in the batch.
191 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
192 .WillOnce(Invoke([](const CommitDataRequest& req,
193 MockProducerEndpoint::CommitDataCallback) {
194 ASSERT_EQ(2, req.chunks_to_move_size());
195
196 // Verify that this is the first chunk that we expect to have been
197 // batched.
198 ASSERT_EQ(1u, req.chunks_to_move()[0].page());
199 ASSERT_EQ(0u, req.chunks_to_move()[0].chunk());
200 ASSERT_EQ(1u, req.chunks_to_move()[0].target_buffer());
201
202 // Verify that this is the second chunk that we expect to have been
203 // batched.
204 ASSERT_EQ(2u, req.chunks_to_move()[1].page());
205 ASSERT_EQ(0u, req.chunks_to_move()[1].chunk());
206 ASSERT_EQ(2u, req.chunks_to_move()[1].target_buffer());
207 }));
208
209 // Pretend we've reached the end of the batching period.
210 arbiter_->FlushPendingCommitDataRequests();
211 }
212
TEST_P(SharedMemoryArbiterImplTest,UseShmemEmulation)213 TEST_P(SharedMemoryArbiterImplTest, UseShmemEmulation) {
214 arbiter_.reset(new SharedMemoryArbiterImpl(
215 buf(), buf_size(), ShmemMode::kShmemEmulation, page_size(),
216 &mock_producer_endpoint_, task_runner_.get()));
217
218 SharedMemoryArbiterImpl::set_default_layout_for_testing(
219 SharedMemoryABI::PageLayout::kPageDiv1);
220
221 size_t page_idx;
222 size_t chunk_idx;
223 auto* abi = arbiter_->shmem_abi_for_testing();
224
225 // Test returning a completed chunk.
226 SharedMemoryABI::Chunk chunk =
227 arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
228 std::tie(page_idx, chunk_idx) = abi->GetPageAndChunkIndex(chunk);
229 ASSERT_TRUE(chunk.is_valid());
230 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _)).Times(1);
231 PatchList ignored;
232 arbiter_->ReturnCompletedChunk(std::move(chunk), 0, &ignored);
233 task_runner_->RunUntilIdle();
234 ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
235 // When running in the emulation mode, the chunk is freed when the
236 // CommitDataRequest is flushed.
237 ASSERT_EQ(
238 SharedMemoryABI::kChunkFree,
239 arbiter_->shmem_abi_for_testing()->GetChunkState(page_idx, chunk_idx));
240
241 // Direct patching is supported in the emulation mode.
242 arbiter_->SetDirectSMBPatchingSupportedByService();
243 ASSERT_TRUE(arbiter_->EnableDirectSMBPatching());
244
245 chunk = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
246 std::tie(page_idx, chunk_idx) = abi->GetPageAndChunkIndex(chunk);
247 ASSERT_TRUE(chunk.is_valid());
248 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
249 .WillOnce(Invoke([&](const CommitDataRequest& req,
250 MockProducerEndpoint::CommitDataCallback) {
251 ASSERT_EQ(1, req.chunks_to_move_size());
252
253 ASSERT_EQ(page_idx, req.chunks_to_move()[0].page());
254 ASSERT_EQ(chunk_idx, req.chunks_to_move()[0].chunk());
255 ASSERT_EQ(1u, req.chunks_to_move()[0].target_buffer());
256
257 // The request should contain chunk data.
258 ASSERT_TRUE(req.chunks_to_move()[0].has_data());
259 }));
260 chunk.SetFlag(SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
261 arbiter_->ReturnCompletedChunk(std::move(chunk), 1, &ignored);
262 task_runner_->RunUntilIdle();
263 ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
264 // A chunk is freed after being flushed.
265 ASSERT_EQ(
266 SharedMemoryABI::kChunkFree,
267 arbiter_->shmem_abi_for_testing()->GetChunkState(page_idx, chunk_idx));
268 }
269
270 // Check that we can actually create up to kMaxWriterID TraceWriter(s).
TEST_P(SharedMemoryArbiterImplTest,WriterIDsAllocation)271 TEST_P(SharedMemoryArbiterImplTest, WriterIDsAllocation) {
272 auto checkpoint = task_runner_->CreateCheckpoint("last_unregistered");
273
274 std::vector<uint32_t> registered_ids;
275 std::vector<uint32_t> unregistered_ids;
276
277 ON_CALL(mock_producer_endpoint_, RegisterTraceWriter)
278 .WillByDefault(
279 [&](uint32_t id, uint32_t) { registered_ids.push_back(id); });
280 ON_CALL(mock_producer_endpoint_, UnregisterTraceWriter)
281 .WillByDefault([&](uint32_t id) {
282 unregistered_ids.push_back(id);
283 if (unregistered_ids.size() == kMaxWriterID) {
284 checkpoint();
285 }
286 });
287 {
288 std::map<WriterID, std::unique_ptr<TraceWriter>> writers;
289
290 for (size_t i = 0; i < kMaxWriterID; i++) {
291 std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(1);
292 ASSERT_TRUE(writer);
293 WriterID writer_id = writer->writer_id();
294 ASSERT_TRUE(writers.emplace(writer_id, std::move(writer)).second);
295 }
296
297 // A further call should return a null impl of trace writer as we exhausted
298 // writer IDs.
299 ASSERT_EQ(arbiter_->CreateTraceWriter(1)->writer_id(), 0);
300 }
301
302 // This should run the Register/UnregisterTraceWriter tasks enqueued by the
303 // memory arbiter.
304 task_runner_->RunUntilCheckpoint("last_unregistered", 15000);
305
306 std::vector<uint32_t> expected_ids; // 1..kMaxWriterID
307 for (uint32_t i = 1; i <= kMaxWriterID; i++)
308 expected_ids.push_back(i);
309 EXPECT_THAT(registered_ids, UnorderedElementsAreArray(expected_ids));
310 EXPECT_THAT(unregistered_ids, UnorderedElementsAreArray(expected_ids));
311 }
312
TEST_P(SharedMemoryArbiterImplTest,Shutdown)313 TEST_P(SharedMemoryArbiterImplTest, Shutdown) {
314 std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(1);
315 EXPECT_TRUE(writer);
316 EXPECT_FALSE(arbiter_->TryShutdown());
317
318 // We still get a valid trace writer after shutdown, but it's a null one
319 // that's not connected to the arbiter.
320 std::unique_ptr<TraceWriter> writer2 = arbiter_->CreateTraceWriter(2);
321 EXPECT_TRUE(writer2);
322 EXPECT_EQ(writer2->writer_id(), 0);
323
324 // Shutdown will succeed once the only non-null writer goes away.
325 writer.reset();
326 EXPECT_TRUE(arbiter_->TryShutdown());
327 }
328
329 // Verify that getting a new chunk doesn't stall when kDrop policy is chosen.
TEST_P(SharedMemoryArbiterImplTest,BufferExhaustedPolicyDrop)330 TEST_P(SharedMemoryArbiterImplTest, BufferExhaustedPolicyDrop) {
331 // Grab all chunks in the SMB.
332 SharedMemoryArbiterImpl::set_default_layout_for_testing(
333 SharedMemoryABI::PageLayout::kPageDiv1);
334 static constexpr size_t kTotChunks = kNumPages;
335 SharedMemoryABI::Chunk chunks[kTotChunks];
336 for (size_t i = 0; i < kTotChunks; i++) {
337 chunks[i] = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
338 ASSERT_TRUE(chunks[i].is_valid());
339 }
340
341 // SMB is exhausted, thus GetNewChunk() should return an invalid chunk. In
342 // kStall mode, this would stall.
343 SharedMemoryABI::Chunk invalid_chunk =
344 arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
345 ASSERT_FALSE(invalid_chunk.is_valid());
346
347 // Returning the chunk is not enough to be able to reacquire it.
348 PatchList ignored;
349 arbiter_->ReturnCompletedChunk(std::move(chunks[0]), 1, &ignored);
350
351 invalid_chunk = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
352 ASSERT_FALSE(invalid_chunk.is_valid());
353
354 // After releasing the chunk as free, we can reacquire it.
355 chunks[0] =
356 arbiter_->shmem_abi_for_testing()->TryAcquireChunkForReading(0, 0);
357 ASSERT_TRUE(chunks[0].is_valid());
358 arbiter_->shmem_abi_for_testing()->ReleaseChunkAsFree(std::move(chunks[0]));
359
360 chunks[0] = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
361 ASSERT_TRUE(chunks[0].is_valid());
362 }
363
TEST_P(SharedMemoryArbiterImplTest,CreateUnboundAndBind)364 TEST_P(SharedMemoryArbiterImplTest, CreateUnboundAndBind) {
365 auto checkpoint_writer = task_runner_->CreateCheckpoint("writer_registered");
366 auto checkpoint_flush = task_runner_->CreateCheckpoint("flush_completed");
367
368 // Create an unbound arbiter and bind immediately.
369 arbiter_.reset(new SharedMemoryArbiterImpl(
370 buf(), buf_size(), ShmemMode::kDefault, page_size(), nullptr, nullptr));
371 arbiter_->BindToProducerEndpoint(&mock_producer_endpoint_,
372 task_runner_.get());
373 EXPECT_TRUE(IsArbiterFullyBound());
374
375 // Trace writer should be registered in a non-delayed task.
376 EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, 1))
377 .WillOnce(testing::InvokeWithoutArgs(checkpoint_writer));
378 std::unique_ptr<TraceWriter> writer =
379 arbiter_->CreateTraceWriter(1, BufferExhaustedPolicy::kDrop);
380 task_runner_->RunUntilCheckpoint("writer_registered", 5000);
381
382 // Commits/flushes should be sent right away.
383 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
384 .WillOnce(testing::InvokeArgument<1>());
385 writer->Flush(checkpoint_flush);
386 task_runner_->RunUntilCheckpoint("flush_completed", 5000);
387 }
388
389 // Startup tracing tests are run with the arbiter in either bound or unbound
390 // initial state. Startup tracing in bound state can still be useful, e.g. in
391 // integration tests or to enable tracing in the consumer process immediately
392 // before/after instructing the service to start a session, avoiding the
393 // round-trip time through the service.
394 enum class InitialBindingState { kUnbound, kBound };
395
396 class SharedMemoryArbiterImplStartupTracingTest
397 : public SharedMemoryArbiterImplTest {
398 public:
SetupArbiter(InitialBindingState initial_state)399 void SetupArbiter(InitialBindingState initial_state) {
400 if (initial_state == InitialBindingState::kUnbound) {
401 arbiter_.reset(
402 new SharedMemoryArbiterImpl(buf(), buf_size(), ShmemMode::kDefault,
403 page_size(), nullptr, nullptr));
404 EXPECT_FALSE(IsArbiterFullyBound());
405 } else {
406 // A bound arbiter is already set up by the base class.
407 EXPECT_TRUE(IsArbiterFullyBound());
408 }
409 }
410
EnsureArbiterBoundToEndpoint(InitialBindingState initial_state)411 void EnsureArbiterBoundToEndpoint(InitialBindingState initial_state) {
412 if (initial_state == InitialBindingState::kUnbound) {
413 arbiter_->BindToProducerEndpoint(&mock_producer_endpoint_,
414 task_runner_.get());
415 }
416 }
417
TestStartupTracing(InitialBindingState initial_state)418 void TestStartupTracing(InitialBindingState initial_state) {
419 constexpr uint16_t kTargetBufferReservationId1 = 1;
420 constexpr uint16_t kTargetBufferReservationId2 = 2;
421
422 SetupArbiter(initial_state);
423
424 // Create an unbound startup writer.
425 std::unique_ptr<TraceWriter> writer =
426 arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
427 EXPECT_FALSE(IsArbiterFullyBound());
428
429 // Write two packets while unbound (if InitialBindingState::kUnbound) and
430 // flush the chunk after each packet. The writer will return the chunk to
431 // the arbiter and grab a new chunk for the second packet. The flush should
432 // only add the chunk into the queued commit request.
433 for (int i = 0; i < 2; i++) {
434 {
435 auto packet = writer->NewTracePacket();
436 packet->set_for_testing()->set_str("foo");
437 }
438 writer->Flush();
439 }
440
441 // Bind to producer endpoint if initially unbound. This should not register
442 // the trace writer yet, because its buffer reservation is still unbound.
443 EnsureArbiterBoundToEndpoint(initial_state);
444 EXPECT_FALSE(IsArbiterFullyBound());
445
446 // Write another packet into another chunk and queue it.
447 {
448 auto packet = writer->NewTracePacket();
449 packet->set_for_testing()->set_str("foo");
450 }
451 bool flush_completed = false;
452 writer->Flush([&flush_completed] { flush_completed = true; });
453
454 // Bind the buffer reservation to a buffer. Trace writer should be
455 // registered and queued commits flushed.
456 EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, 42));
457 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
458 .WillOnce(Invoke([](const CommitDataRequest& req,
459 MockProducerEndpoint::CommitDataCallback callback) {
460 ASSERT_EQ(3, req.chunks_to_move_size());
461 EXPECT_EQ(42u, req.chunks_to_move()[0].target_buffer());
462 EXPECT_EQ(42u, req.chunks_to_move()[1].target_buffer());
463 EXPECT_EQ(42u, req.chunks_to_move()[2].target_buffer());
464 callback();
465 }));
466
467 arbiter_->BindStartupTargetBuffer(kTargetBufferReservationId1, 42);
468 EXPECT_TRUE(IsArbiterFullyBound());
469
470 testing::Mock::VerifyAndClearExpectations(&mock_producer_endpoint_);
471 EXPECT_TRUE(flush_completed);
472
473 // Creating a new startup writer for the same buffer posts an immediate task
474 // to register it.
475 auto checkpoint_register1b =
476 task_runner_->CreateCheckpoint("writer1b_registered");
477 EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, 42))
478 .WillOnce(testing::InvokeWithoutArgs(checkpoint_register1b));
479 std::unique_ptr<TraceWriter> writer1b =
480 arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
481 task_runner_->RunUntilCheckpoint("writer1b_registered", 5000);
482
483 // And a commit on this new writer should be flushed to the right buffer,
484 // too.
485 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
486 .WillOnce(Invoke([](const CommitDataRequest& req,
487 MockProducerEndpoint::CommitDataCallback callback) {
488 ASSERT_EQ(1, req.chunks_to_move_size());
489 EXPECT_EQ(42u, req.chunks_to_move()[0].target_buffer());
490 callback();
491 }));
492 {
493 auto packet = writer1b->NewTracePacket();
494 packet->set_for_testing()->set_str("foo");
495 }
496 flush_completed = false;
497 writer1b->Flush([&flush_completed] { flush_completed = true; });
498
499 testing::Mock::VerifyAndClearExpectations(&mock_producer_endpoint_);
500 EXPECT_TRUE(flush_completed);
501
502 // Create another startup writer for another target buffer, which puts the
503 // arbiter back into unbound state.
504 std::unique_ptr<TraceWriter> writer2 =
505 arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId2);
506 EXPECT_FALSE(IsArbiterFullyBound());
507
508 // Write a chunk into both writers. Both should be queued up into the next
509 // commit request.
510 {
511 auto packet = writer->NewTracePacket();
512 packet->set_for_testing()->set_str("foo");
513 }
514 writer->Flush();
515 {
516 auto packet = writer2->NewTracePacket();
517 packet->set_for_testing()->set_str("bar");
518 }
519 flush_completed = false;
520 writer2->Flush([&flush_completed] { flush_completed = true; });
521
522 // Destroy the first trace writer, which should cause the arbiter to post a
523 // task to unregister it.
524 auto checkpoint_writer =
525 task_runner_->CreateCheckpoint("writer_unregistered");
526 EXPECT_CALL(mock_producer_endpoint_,
527 UnregisterTraceWriter(writer->writer_id()))
528 .WillOnce(testing::InvokeWithoutArgs(checkpoint_writer));
529 writer.reset();
530 task_runner_->RunUntilCheckpoint("writer_unregistered", 5000);
531
532 // Bind the second buffer reservation to a buffer. Second trace writer
533 // should be registered and queued commits flushed.
534 EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, 23));
535 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
536 .WillOnce(Invoke([](const CommitDataRequest& req,
537 MockProducerEndpoint::CommitDataCallback callback) {
538 ASSERT_EQ(2, req.chunks_to_move_size());
539 EXPECT_EQ(42u, req.chunks_to_move()[0].target_buffer());
540 EXPECT_EQ(23u, req.chunks_to_move()[1].target_buffer());
541 callback();
542 }));
543
544 arbiter_->BindStartupTargetBuffer(kTargetBufferReservationId2, 23);
545 EXPECT_TRUE(IsArbiterFullyBound());
546
547 testing::Mock::VerifyAndClearExpectations(&mock_producer_endpoint_);
548 EXPECT_TRUE(flush_completed);
549 }
550
TestAbortStartupTracingForReservation(InitialBindingState initial_state)551 void TestAbortStartupTracingForReservation(
552 InitialBindingState initial_state) {
553 constexpr uint16_t kTargetBufferReservationId1 = 1;
554 constexpr uint16_t kTargetBufferReservationId2 = 2;
555
556 SetupArbiter(initial_state);
557
558 // Create two unbound startup writers the same target buffer.
559 SharedMemoryABI* shmem_abi = arbiter_->shmem_abi_for_testing();
560 std::unique_ptr<TraceWriter> writer =
561 arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
562 std::unique_ptr<TraceWriter> writer2 =
563 arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
564
565 // Write two packet while unbound and flush the chunk after each packet. The
566 // writer will return the chunk to the arbiter and grab a new chunk for the
567 // second packet. The flush should only add the chunk into the queued commit
568 // request.
569 for (int i = 0; i < 2; i++) {
570 {
571 auto packet = writer->NewTracePacket();
572 packet->set_for_testing()->set_str("foo");
573 }
574 writer->Flush();
575 }
576
577 // Expectations for the below calls.
578 EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, _)).Times(0);
579 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
580 .WillOnce(Invoke([shmem_abi](const CommitDataRequest& req,
581 MockProducerEndpoint::CommitDataCallback) {
582 ASSERT_EQ(2, req.chunks_to_move_size());
583 for (size_t i = 0; i < 2; i++) {
584 EXPECT_EQ(0u, req.chunks_to_move()[i].target_buffer());
585 SharedMemoryABI::Chunk chunk = shmem_abi->TryAcquireChunkForReading(
586 req.chunks_to_move()[i].page(),
587 req.chunks_to_move()[i].chunk());
588 shmem_abi->ReleaseChunkAsFree(std::move(chunk));
589 }
590 }));
591
592 // Abort the first session. This should resolve the two chunks committed up
593 // to this point to an invalid target buffer (ID 0). They will remain
594 // buffered until bound to an endpoint (if InitialBindingState::kUnbound).
595 arbiter_->AbortStartupTracingForReservation(kTargetBufferReservationId1);
596
597 // Destroy a writer that was created before the abort. This should not cause
598 // crashes.
599 EXPECT_CALL(mock_producer_endpoint_,
600 UnregisterTraceWriter(writer2->writer_id()))
601 .Times(Between(0, 1)); // Depending on `initial_state`.
602 writer2.reset();
603
604 // Bind to producer endpoint if unbound. The trace writer should not be
605 // registered as its target buffer is invalid. Since no startup sessions are
606 // active anymore, the arbiter should be fully bound. The commit data
607 // request is flushed.
608 EnsureArbiterBoundToEndpoint(initial_state);
609 EXPECT_TRUE(IsArbiterFullyBound());
610
611 // SMB should be free again, as no writer holds on to any chunk anymore.
612 for (size_t i = 0; i < shmem_abi->num_pages(); i++)
613 EXPECT_TRUE(shmem_abi->is_page_free(i));
614
615 // Write another packet into another chunk and commit it. It should be sent
616 // to the arbiter with invalid target buffer (ID 0).
617 {
618 auto packet = writer->NewTracePacket();
619 packet->set_for_testing()->set_str("foo");
620 }
621 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
622 .WillOnce(Invoke(
623 [shmem_abi](const CommitDataRequest& req,
624 MockProducerEndpoint::CommitDataCallback callback) {
625 ASSERT_EQ(1, req.chunks_to_move_size());
626 EXPECT_EQ(0u, req.chunks_to_move()[0].target_buffer());
627 SharedMemoryABI::Chunk chunk =
628 shmem_abi->TryAcquireChunkForReading(
629 req.chunks_to_move()[0].page(),
630 req.chunks_to_move()[0].chunk());
631 shmem_abi->ReleaseChunkAsFree(std::move(chunk));
632 callback();
633 }));
634 bool flush_completed = false;
635 writer->Flush([&flush_completed] { flush_completed = true; });
636 EXPECT_TRUE(flush_completed);
637
638 // Creating a new startup writer for the same buffer does not cause it to
639 // register.
640 EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, _)).Times(0);
641 std::unique_ptr<TraceWriter> writer1b =
642 arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
643
644 // And a commit on this new writer should again be flushed to the invalid
645 // target buffer.
646 {
647 auto packet = writer1b->NewTracePacket();
648 packet->set_for_testing()->set_str("foo");
649 }
650 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
651 .WillOnce(Invoke(
652 [shmem_abi](const CommitDataRequest& req,
653 MockProducerEndpoint::CommitDataCallback callback) {
654 ASSERT_EQ(1, req.chunks_to_move_size());
655 EXPECT_EQ(0u, req.chunks_to_move()[0].target_buffer());
656 SharedMemoryABI::Chunk chunk =
657 shmem_abi->TryAcquireChunkForReading(
658 req.chunks_to_move()[0].page(),
659 req.chunks_to_move()[0].chunk());
660 shmem_abi->ReleaseChunkAsFree(std::move(chunk));
661 callback();
662 }));
663 flush_completed = false;
664 writer1b->Flush([&flush_completed] { flush_completed = true; });
665 EXPECT_TRUE(flush_completed);
666
667 // Create another startup writer for another target buffer, which puts the
668 // arbiter back into unbound state.
669 std::unique_ptr<TraceWriter> writer3 =
670 arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId2);
671 EXPECT_FALSE(IsArbiterFullyBound());
672
673 // Write a chunk into both writers. Both should be queued up into the next
674 // commit request.
675 {
676 auto packet = writer->NewTracePacket();
677 packet->set_for_testing()->set_str("foo");
678 }
679 writer->Flush();
680 {
681 auto packet = writer3->NewTracePacket();
682 packet->set_for_testing()->set_str("bar");
683 }
684 flush_completed = false;
685 writer3->Flush([&flush_completed] { flush_completed = true; });
686
687 // Destroy the first trace writer, which should cause the arbiter to post a
688 // task to unregister it.
689 auto checkpoint_writer =
690 task_runner_->CreateCheckpoint("writer_unregistered");
691 EXPECT_CALL(mock_producer_endpoint_,
692 UnregisterTraceWriter(writer->writer_id()))
693 .WillOnce(testing::InvokeWithoutArgs(checkpoint_writer));
694 writer.reset();
695 task_runner_->RunUntilCheckpoint("writer_unregistered", 5000);
696
697 // Abort the second session. Its commits should now also be associated with
698 // target buffer 0, and both writers' commits flushed.
699 EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, _)).Times(0);
700 EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
701 .WillOnce(Invoke(
702 [shmem_abi](const CommitDataRequest& req,
703 MockProducerEndpoint::CommitDataCallback callback) {
704 ASSERT_EQ(2, req.chunks_to_move_size());
705 for (size_t i = 0; i < 2; i++) {
706 EXPECT_EQ(0u, req.chunks_to_move()[i].target_buffer());
707 SharedMemoryABI::Chunk chunk =
708 shmem_abi->TryAcquireChunkForReading(
709 req.chunks_to_move()[i].page(),
710 req.chunks_to_move()[i].chunk());
711 shmem_abi->ReleaseChunkAsFree(std::move(chunk));
712 }
713 callback();
714 }));
715
716 arbiter_->AbortStartupTracingForReservation(kTargetBufferReservationId2);
717 EXPECT_TRUE(IsArbiterFullyBound());
718 EXPECT_TRUE(flush_completed);
719
720 // SMB should be free again, as no writer holds on to any chunk anymore.
721 for (size_t i = 0; i < shmem_abi->num_pages(); i++)
722 EXPECT_TRUE(shmem_abi->is_page_free(i));
723 }
724 };
725
726 INSTANTIATE_TEST_SUITE_P(PageSize,
727 SharedMemoryArbiterImplStartupTracingTest,
728 ::testing::ValuesIn(kPageSizes));
729
TEST_P(SharedMemoryArbiterImplStartupTracingTest,StartupTracingUnbound)730 TEST_P(SharedMemoryArbiterImplStartupTracingTest, StartupTracingUnbound) {
731 TestStartupTracing(InitialBindingState::kUnbound);
732 }
733
TEST_P(SharedMemoryArbiterImplStartupTracingTest,StartupTracingBound)734 TEST_P(SharedMemoryArbiterImplStartupTracingTest, StartupTracingBound) {
735 TestStartupTracing(InitialBindingState::kBound);
736 }
737
TEST_P(SharedMemoryArbiterImplStartupTracingTest,AbortStartupTracingForReservationUnbound)738 TEST_P(SharedMemoryArbiterImplStartupTracingTest,
739 AbortStartupTracingForReservationUnbound) {
740 TestAbortStartupTracingForReservation(InitialBindingState::kUnbound);
741 }
742
TEST_P(SharedMemoryArbiterImplStartupTracingTest,AbortStartupTracingForReservationBound)743 TEST_P(SharedMemoryArbiterImplStartupTracingTest,
744 AbortStartupTracingForReservationBound) {
745 TestAbortStartupTracingForReservation(InitialBindingState::kBound);
746 }
747
748 // TODO(primiano): add multi-threaded tests.
749
750 } // namespace perfetto
751