xref: /aosp_15_r20/external/perfetto/src/tracing/core/shared_memory_abi_unittest.cc (revision 6dbdd20afdafa5e3ca9b8809fa73465d530080dc)
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
18 
19 #include "perfetto/ext/tracing/core/basic_types.h"
20 #include "src/tracing/test/aligned_buffer_test.h"
21 #include "test/gtest_and_gmock.h"
22 
23 namespace perfetto {
24 namespace {
25 
26 using testing::ValuesIn;
27 using Chunk = SharedMemoryABI::Chunk;
28 using ChunkHeader = SharedMemoryABI::ChunkHeader;
29 
30 using SharedMemoryABITest = AlignedBufferTest;
31 
32 size_t const kPageSizes[] = {4096, 8192, 16384, 32768, 65536};
33 INSTANTIATE_TEST_SUITE_P(PageSize, SharedMemoryABITest, ValuesIn(kPageSizes));
34 
TEST_P(SharedMemoryABITest,NominalCases)35 TEST_P(SharedMemoryABITest, NominalCases) {
36   SharedMemoryABI abi(buf(), buf_size(), page_size(),
37                       SharedMemoryABI::ShmemMode::kDefault);
38 
39   ASSERT_EQ(buf(), abi.start());
40   ASSERT_EQ(buf() + buf_size(), abi.end());
41   ASSERT_EQ(buf_size(), abi.size());
42   ASSERT_EQ(page_size(), abi.page_size());
43   ASSERT_EQ(kNumPages, abi.num_pages());
44 
45   for (size_t i = 0; i < kNumPages; i++) {
46     ASSERT_TRUE(abi.is_page_free(i));
47     ASSERT_FALSE(abi.is_page_complete(i));
48     // GetFreeChunks() should return 0 for an unpartitioned page.
49     ASSERT_EQ(0u, abi.GetFreeChunks(i));
50   }
51 
52   ASSERT_TRUE(abi.TryPartitionPage(0, SharedMemoryABI::kPageDiv1));
53   ASSERT_EQ(0x01u, abi.GetFreeChunks(0));
54 
55   ASSERT_TRUE(abi.TryPartitionPage(1, SharedMemoryABI::kPageDiv2));
56   ASSERT_EQ(0x03u, abi.GetFreeChunks(1));
57 
58   ASSERT_TRUE(abi.TryPartitionPage(2, SharedMemoryABI::kPageDiv4));
59   ASSERT_EQ(0x0fu, abi.GetFreeChunks(2));
60 
61   ASSERT_TRUE(abi.TryPartitionPage(3, SharedMemoryABI::kPageDiv7));
62   ASSERT_EQ(0x7fu, abi.GetFreeChunks(3));
63 
64   ASSERT_TRUE(abi.TryPartitionPage(4, SharedMemoryABI::kPageDiv14));
65   ASSERT_EQ(0x3fffu, abi.GetFreeChunks(4));
66 
67   // Repartitioning an existing page must fail.
68   ASSERT_FALSE(abi.TryPartitionPage(0, SharedMemoryABI::kPageDiv1));
69   ASSERT_FALSE(abi.TryPartitionPage(4, SharedMemoryABI::kPageDiv14));
70 
71   for (size_t i = 0; i <= 4; i++) {
72     ASSERT_FALSE(abi.is_page_free(i));
73     ASSERT_FALSE(abi.is_page_complete(i));
74   }
75 
76   uint16_t last_chunk_id = 0;
77   uint16_t last_writer_id = 0;
78   uint8_t* last_chunk_begin = nullptr;
79   uint8_t* last_chunk_end = nullptr;
80 
81   for (size_t page_idx = 0; page_idx <= 4; page_idx++) {
82     uint8_t* const page_start = buf() + page_idx * page_size();
83     uint8_t* const page_end = page_start + page_size();
84     const size_t num_chunks = SharedMemoryABI::GetNumChunksFromHeaderBitmap(
85         abi.GetPageHeaderBitmap(page_idx));
86     Chunk chunks[14];
87 
88     for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
89       Chunk& chunk = chunks[chunk_idx];
90       ChunkHeader header{};
91 
92       ASSERT_EQ(SharedMemoryABI::kChunkFree,
93                 abi.GetChunkState(page_idx, chunk_idx));
94       uint16_t chunk_id = ++last_chunk_id;
95       last_writer_id = (last_writer_id + 1) & kMaxWriterID;
96       uint16_t writer_id = last_writer_id;
97       header.chunk_id.store(chunk_id);
98       header.writer_id.store(writer_id);
99 
100       uint16_t packets_count = static_cast<uint16_t>(chunk_idx * 10);
101       const uint8_t kFlagsMask = (1 << 6) - 1;
102       uint8_t flags = static_cast<uint8_t>((0xffu - chunk_idx) & kFlagsMask);
103       header.packets.store({packets_count, flags});
104 
105       chunk = abi.TryAcquireChunkForWriting(page_idx, chunk_idx, &header);
106       ASSERT_TRUE(chunk.is_valid());
107       ASSERT_EQ(SharedMemoryABI::kChunkBeingWritten,
108                 abi.GetChunkState(page_idx, chunk_idx));
109 
110       // Check chunk bounds.
111       size_t expected_chunk_size =
112           (page_size() - sizeof(SharedMemoryABI::PageHeader)) / num_chunks;
113       expected_chunk_size = expected_chunk_size - (expected_chunk_size % 4);
114       ASSERT_EQ(expected_chunk_size, chunk.size());
115       ASSERT_EQ(expected_chunk_size - sizeof(SharedMemoryABI::ChunkHeader),
116                 chunk.payload_size());
117       ASSERT_GT(chunk.begin(), page_start);
118       ASSERT_GT(chunk.begin(), last_chunk_begin);
119       ASSERT_GE(chunk.begin(), last_chunk_end);
120       ASSERT_LE(chunk.end(), page_end);
121       ASSERT_GT(chunk.end(), chunk.begin());
122       ASSERT_EQ(chunk.end(), chunk.begin() + chunk.size());
123       last_chunk_begin = chunk.begin();
124       last_chunk_end = chunk.end();
125 
126       ASSERT_EQ(chunk_id, chunk.header()->chunk_id.load());
127       ASSERT_EQ(writer_id, chunk.header()->writer_id.load());
128       ASSERT_EQ(packets_count, chunk.header()->packets.load().count);
129       ASSERT_EQ(flags, chunk.header()->packets.load().flags);
130       ASSERT_EQ(std::make_pair(packets_count, flags),
131                 chunk.GetPacketCountAndFlags());
132 
133       chunk.IncrementPacketCount();
134       ASSERT_EQ(packets_count + 1, chunk.header()->packets.load().count);
135 
136       chunk.IncrementPacketCount();
137       ASSERT_EQ(packets_count + 2, chunk.header()->packets.load().count);
138 
139       chunk.SetFlag(
140           SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
141       ASSERT_TRUE(
142           chunk.header()->packets.load().flags &
143           SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
144 
145       // Test clearing the needs patching flag.
146       chunk.SetFlag(SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
147       ASSERT_TRUE(chunk.header()->packets.load().flags &
148                   SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
149       chunk.ClearNeedsPatchingFlag();
150       ASSERT_FALSE(chunk.header()->packets.load().flags &
151                    SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
152 
153       // Reacquiring the same chunk should fail.
154       ASSERT_FALSE(abi.TryAcquireChunkForWriting(page_idx, chunk_idx, &header)
155                        .is_valid());
156     }
157 
158     // Now release chunks and check the Release() logic.
159     for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
160       Chunk& chunk = chunks[chunk_idx];
161 
162       size_t res = abi.ReleaseChunkAsComplete(std::move(chunk));
163       ASSERT_EQ(page_idx, res);
164       ASSERT_EQ(chunk_idx == num_chunks - 1, abi.is_page_complete(page_idx));
165       ASSERT_EQ(SharedMemoryABI::kChunkComplete,
166                 abi.GetChunkState(page_idx, chunk_idx));
167     }
168 
169     // Now acquire all chunks for reading.
170     for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
171       Chunk& chunk = chunks[chunk_idx];
172       chunk = abi.TryAcquireChunkForReading(page_idx, chunk_idx);
173       ASSERT_TRUE(chunk.is_valid());
174       ASSERT_EQ(SharedMemoryABI::kChunkBeingRead,
175                 abi.GetChunkState(page_idx, chunk_idx));
176     }
177 
178     // Finally release all chunks as free.
179     for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
180       Chunk& chunk = chunks[chunk_idx];
181 
182       // If this was the last chunk in the page, the full page should be marked
183       // as free.
184       size_t res = abi.ReleaseChunkAsFree(std::move(chunk));
185       ASSERT_EQ(page_idx, res);
186       ASSERT_EQ(chunk_idx == num_chunks - 1, abi.is_page_free(page_idx));
187       ASSERT_EQ(SharedMemoryABI::kChunkFree,
188                 abi.GetChunkState(page_idx, chunk_idx));
189     }
190   }
191 }
192 
193 // Tests chunk state transition in the emulation mode.
TEST_P(SharedMemoryABITest,ShmemEmulation)194 TEST_P(SharedMemoryABITest, ShmemEmulation) {
195   SharedMemoryABI abi(buf(), buf_size(), page_size(),
196                       SharedMemoryABI::ShmemMode::kShmemEmulation);
197 
198   for (size_t i = 0; i < kNumPages; i++) {
199     ASSERT_TRUE(abi.is_page_free(i));
200     ASSERT_FALSE(abi.is_page_complete(i));
201     // GetFreeChunks() should return 0 for an unpartitioned page.
202     ASSERT_EQ(0u, abi.GetFreeChunks(i));
203   }
204 
205   ASSERT_TRUE(abi.TryPartitionPage(0, SharedMemoryABI::kPageDiv14));
206   ASSERT_EQ(0x3fffu, abi.GetFreeChunks(0));
207 
208   ASSERT_FALSE(abi.is_page_free(0));
209 
210   const size_t num_chunks =
211       SharedMemoryABI::GetNumChunksFromHeaderBitmap(abi.GetPageHeaderBitmap(0));
212   Chunk chunks[14];
213 
214   for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
215     Chunk& chunk = chunks[chunk_idx];
216     ChunkHeader header{};
217 
218     ASSERT_EQ(SharedMemoryABI::kChunkFree, abi.GetChunkState(0, chunk_idx));
219 
220     chunk = abi.TryAcquireChunkForWriting(0, chunk_idx, &header);
221     ASSERT_TRUE(chunk.is_valid());
222     ASSERT_EQ(SharedMemoryABI::kChunkBeingWritten,
223               abi.GetChunkState(0, chunk_idx));
224 
225     // Reacquiring the same chunk should fail.
226     ASSERT_FALSE(
227         abi.TryAcquireChunkForWriting(0, chunk_idx, &header).is_valid());
228   }
229 
230   // Now release chunks and check the Release() logic.
231   for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
232     Chunk& chunk = chunks[chunk_idx];
233 
234     size_t res = abi.ReleaseChunkAsComplete(std::move(chunk));
235     ASSERT_EQ(0u, res);
236     ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi.GetChunkState(0, chunk_idx));
237   }
238 
239   for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
240     Chunk chunk =
241         abi.GetChunkUnchecked(0, abi.GetPageHeaderBitmap(0), chunk_idx);
242 
243     // If this was the last chunk in the page, the full page should be marked
244     // as free.
245     size_t res = abi.ReleaseChunkAsFree(std::move(chunk));
246     ASSERT_EQ(0u, res);
247     ASSERT_EQ(chunk_idx == num_chunks - 1, abi.is_page_free(0));
248     ASSERT_EQ(SharedMemoryABI::kChunkFree, abi.GetChunkState(0u, chunk_idx));
249   }
250 }
251 
252 }  // namespace
253 }  // namespace perfetto
254