1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "stats_buffer_writer_queue.h"
18
19 #include <private/android_filesystem_config.h>
20 #include <unistd.h>
21
22 #include <chrono>
23 #include <queue>
24 #include <thread>
25
26 #include "stats_buffer_writer_impl.h"
27 #include "stats_buffer_writer_queue_impl.h"
28 #include "utils.h"
29
30 namespace {
31 constexpr int32_t kBootTimeEventElapsedTimeAtomId = 240;
32 }
33
BufferWriterQueue()34 BufferWriterQueue::BufferWriterQueue() : mWorkThread(&BufferWriterQueue::processCommands, this) {
35 pthread_setname_np(mWorkThread.native_handle(), "socket_writer_queue");
36 }
37
~BufferWriterQueue()38 BufferWriterQueue::~BufferWriterQueue() {
39 terminate();
40 // at this stage there can be N elements in the queue for which memory needs to be freed
41 // explicitly
42 drainQueue();
43 }
44
write(const uint8_t * buffer,size_t size,uint32_t atomId)45 bool BufferWriterQueue::write(const uint8_t* buffer, size_t size, uint32_t atomId) {
46 Cmd cmd = createWriteBufferCmd(buffer, size, atomId);
47 if (cmd.buffer == NULL) {
48 return false;
49 }
50 return pushToQueue(cmd);
51 }
52
getQueueSize() const53 size_t BufferWriterQueue::getQueueSize() const {
54 std::unique_lock<std::mutex> lock(mMutex);
55 return mCmdQueue.size();
56 }
57
pushToQueue(const Cmd & cmd)58 bool BufferWriterQueue::pushToQueue(const Cmd& cmd) {
59 {
60 std::unique_lock<std::mutex> lock(mMutex);
61 if (mCmdQueue.size() >= kQueueMaxSizeLimit) {
62 // TODO (b/258003151): add logging info about internal queue overflow with appropriate
63 // error code
64 return false;
65 }
66 mCmdQueue.push(cmd);
67 }
68 mCondition.notify_one();
69 return true;
70 }
71
createWriteBufferCmd(const uint8_t * buffer,size_t size,uint32_t atomId)72 BufferWriterQueue::Cmd BufferWriterQueue::createWriteBufferCmd(const uint8_t* buffer, size_t size,
73 uint32_t atomId) {
74 BufferWriterQueue::Cmd writeCmd;
75 writeCmd.atomId = atomId;
76 writeCmd.buffer = (uint8_t*)malloc(size);
77 if (writeCmd.buffer == NULL) {
78 return writeCmd;
79 }
80 memcpy(writeCmd.buffer, buffer, size);
81 writeCmd.size = size;
82 return writeCmd;
83 }
84
terminate()85 void BufferWriterQueue::terminate() {
86 if (mWorkThread.joinable()) {
87 mDoTerminate = true;
88 Cmd terminateCmd;
89 terminateCmd.buffer = NULL;
90 pushToQueue(terminateCmd);
91 mWorkThread.join();
92 }
93 }
94
drainQueue()95 void BufferWriterQueue::drainQueue() {
96 std::unique_lock<std::mutex> lock(mMutex);
97 while (!mCmdQueue.empty()) {
98 free(mCmdQueue.front().buffer);
99 mCmdQueue.pop();
100 }
101 }
102
processCommands()103 void BufferWriterQueue::processCommands() {
104 while (true) {
105 // temporary local thread copy
106 Cmd cmd;
107 {
108 std::unique_lock<std::mutex> lock(mMutex);
109 if (mCmdQueue.empty()) {
110 mCondition.wait(lock, [this] { return !this->mCmdQueue.empty(); });
111 }
112 cmd = mCmdQueue.front();
113 }
114
115 if (cmd.buffer == NULL) {
116 // null buffer ptr used as a marker of the termination request
117 return;
118 }
119
120 const bool writeSuccess = handleCommand(cmd);
121 if (writeSuccess) {
122 // no event drop is observed otherwise command remains in the queue
123 // and worker thread will try to log later on
124
125 // call free() explicitly here to free memory before the mutex lock
126 free(cmd.buffer);
127 {
128 std::unique_lock<std::mutex> lock(mMutex);
129 // this will lead to Cmd destructor call which will be no-op since now the
130 // buffer is NULL
131 mCmdQueue.pop();
132 }
133 }
134 // TODO (b/258003151): add logging info about retry count
135
136 if (mDoTerminate) {
137 return;
138 }
139
140 // attempt to enforce the logging frequency constraints
141 // in case of failed write due to socket overflow the sleep can be longer
142 // to not overload socket continuously
143 if (!writeSuccess) {
144 std::this_thread::sleep_for(std::chrono::milliseconds(kDelayOnFailedWriteMs));
145 }
146 }
147 }
148
handleCommand(const Cmd & cmd) const149 bool BufferWriterQueue::handleCommand(const Cmd& cmd) const {
150 // skip log drop if occurs, since the atom remains in the queue and write will be retried
151 return write_buffer_to_statsd_impl(cmd.buffer, cmd.size, cmd.atomId, /*doNoteDrop*/ false) > 0;
152 }
153
write_buffer_to_statsd_queue(const uint8_t * buffer,size_t size,uint32_t atomId)154 bool write_buffer_to_statsd_queue(const uint8_t* buffer, size_t size, uint32_t atomId) {
155 static BufferWriterQueue queue;
156 return queue.write(buffer, size, atomId);
157 }
158
should_write_via_queue(uint32_t atomId)159 bool should_write_via_queue(uint32_t atomId) {
160 // bootstats is very short living process - queue does not have sufficient
161 // time to be drained entirely so writing this atom straight to socket
162 if (atomId == kBootTimeEventElapsedTimeAtomId) {
163 return false;
164 }
165
166 const uint32_t appUid = getuid();
167
168 // hard-coded push all system server atoms to queue
169 if (appUid == AID_SYSTEM) {
170 return true;
171 }
172
173 #ifdef ENABLE_BENCHMARK_SUPPORT
174 // some hand-picked atoms to be pushed into the queue
175 switch (atomId) {
176 case 47: // APP_BREADCRUMB_REPORTED for statsd_benchmark purpose
177 return true;
178 default:
179 return false;
180 }
181 #endif // ENABLE_BENCHMARK_SUPPORT
182 return false;
183 }
184