1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef __ANDROID_VENDOR__
18 // only used on NDK tests outside of vendor
19 #include <aidl/IBinderRpcTest.h>
20 #endif
21
22 #if defined(__LP64__)
23 #define TEST_FILE_SUFFIX "64"
24 #else
25 #define TEST_FILE_SUFFIX "32"
26 #endif
27
28 #include <chrono>
29 #include <cstdlib>
30 #include <iostream>
31 #include <thread>
32 #include <type_traits>
33
34 #include <dirent.h>
35 #include <dlfcn.h>
36 #include <poll.h>
37 #include <sys/prctl.h>
38 #include <sys/socket.h>
39
40 #ifdef BINDER_RPC_TO_TRUSTY_TEST
41 #include <binder/RpcTransportTipcAndroid.h>
42 #include <trusty/tipc.h>
43 #endif // BINDER_RPC_TO_TRUSTY_TEST
44
45 #include "../Utils.h"
46 #include "binderRpcTestCommon.h"
47 #include "binderRpcTestFixture.h"
48
49 // TODO need to add IServiceManager.cpp/.h to libbinder_no_kernel
50 #ifdef BINDER_WITH_KERNEL_IPC
51 #include "android-base/logging.h"
52 #include "android/binder_manager.h"
53 #include "android/binder_rpc.h"
54 #endif // BINDER_WITH_KERNEL_IPC
55
56 using namespace std::chrono_literals;
57 using namespace std::placeholders;
58 using android::binder::borrowed_fd;
59 using android::binder::GetExecutableDirectory;
60 using android::binder::ReadFdToString;
61 using android::binder::unique_fd;
62 using testing::AssertionFailure;
63 using testing::AssertionResult;
64 using testing::AssertionSuccess;
65
66 namespace android {
67
68 #ifdef BINDER_TEST_NO_SHARED_LIBS
69 constexpr bool kEnableSharedLibs = false;
70 #else
71 constexpr bool kEnableSharedLibs = true;
72 #endif
73
74 #ifdef BINDER_RPC_TO_TRUSTY_TEST
75 constexpr char kTrustyIpcDevice[] = "/dev/trusty-ipc-dev0";
76 #endif
77
78 constexpr char kKnownAidlService[] = "activity";
79
WaitStatusToString(int wstatus)80 static std::string WaitStatusToString(int wstatus) {
81 if (WIFEXITED(wstatus)) {
82 return "exit status " + std::to_string(WEXITSTATUS(wstatus));
83 }
84 if (WIFSIGNALED(wstatus)) {
85 return "term signal " + std::to_string(WTERMSIG(wstatus));
86 }
87 return "unexpected state " + std::to_string(wstatus);
88 }
89
debugBacktrace(pid_t pid)90 static void debugBacktrace(pid_t pid) {
91 std::cerr << "TAKING BACKTRACE FOR PID " << pid << std::endl;
92 system((std::string("debuggerd -b ") + std::to_string(pid)).c_str());
93 }
94
95 class Process {
96 public:
Process(Process && other)97 Process(Process&& other)
98 : mCustomExitStatusCheck(std::move(other.mCustomExitStatusCheck)),
99 mReadEnd(std::move(other.mReadEnd)),
100 mWriteEnd(std::move(other.mWriteEnd)) {
101 // The default move constructor doesn't clear mPid after moving it,
102 // which we need to do because the destructor checks for mPid!=0
103 mPid = other.mPid;
104 other.mPid = 0;
105 }
Process(const std::function<void (borrowed_fd,borrowed_fd)> & f)106 Process(const std::function<void(borrowed_fd /* writeEnd */, borrowed_fd /* readEnd */)>& f) {
107 unique_fd childWriteEnd;
108 unique_fd childReadEnd;
109 if (!binder::Pipe(&mReadEnd, &childWriteEnd, 0)) PLOGF("child write pipe failed");
110 if (!binder::Pipe(&childReadEnd, &mWriteEnd, 0)) PLOGF("child read pipe failed");
111 if (0 == (mPid = fork())) {
112 // racey: assume parent doesn't crash before this is set
113 prctl(PR_SET_PDEATHSIG, SIGHUP);
114
115 f(childWriteEnd, childReadEnd);
116
117 exit(0);
118 }
119 }
~Process()120 ~Process() {
121 if (mPid != 0) {
122 int wstatus;
123 waitpid(mPid, &wstatus, 0);
124 if (mCustomExitStatusCheck) {
125 mCustomExitStatusCheck(wstatus);
126 } else {
127 EXPECT_TRUE(WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0)
128 << "server process failed: " << WaitStatusToString(wstatus);
129 }
130 }
131 }
readEnd()132 borrowed_fd readEnd() { return mReadEnd; }
writeEnd()133 borrowed_fd writeEnd() { return mWriteEnd; }
134
setCustomExitStatusCheck(std::function<void (int wstatus)> f)135 void setCustomExitStatusCheck(std::function<void(int wstatus)> f) {
136 mCustomExitStatusCheck = std::move(f);
137 }
138
139 // Kill the process. Avoid if possible. Shutdown gracefully via an RPC instead.
terminate()140 void terminate() { kill(mPid, SIGTERM); }
141
getPid()142 pid_t getPid() { return mPid; }
143
144 private:
145 std::function<void(int wstatus)> mCustomExitStatusCheck;
146 pid_t mPid = 0;
147 unique_fd mReadEnd;
148 unique_fd mWriteEnd;
149 };
150
allocateSocketAddress()151 static std::string allocateSocketAddress() {
152 static size_t id = 0;
153 std::string temp = getenv("TMPDIR") ?: "/tmp";
154 auto ret = temp + "/binderRpcTest_" + std::to_string(getpid()) + "_" + std::to_string(id++);
155 unlink(ret.c_str());
156 return ret;
157 };
158
initUnixSocket(std::string addr)159 static unique_fd initUnixSocket(std::string addr) {
160 auto socket_addr = UnixSocketAddress(addr.c_str());
161 unique_fd fd(TEMP_FAILURE_RETRY(socket(socket_addr.addr()->sa_family, SOCK_STREAM, AF_UNIX)));
162 if (!fd.ok()) PLOGF("initUnixSocket failed to create socket");
163 if (0 != TEMP_FAILURE_RETRY(bind(fd.get(), socket_addr.addr(), socket_addr.addrSize()))) {
164 PLOGF("initUnixSocket failed to bind");
165 }
166 return fd;
167 }
168
169 // Destructors need to be defined, even if pure virtual
~ProcessSession()170 ProcessSession::~ProcessSession() {}
171
172 class LinuxProcessSession : public ProcessSession {
173 public:
174 // reference to process hosting a socket server
175 Process host;
176
177 LinuxProcessSession(LinuxProcessSession&&) = default;
LinuxProcessSession(Process && host)178 LinuxProcessSession(Process&& host) : host(std::move(host)) {}
~LinuxProcessSession()179 ~LinuxProcessSession() override {
180 for (auto& session : sessions) {
181 session.root = nullptr;
182 }
183
184 for (size_t sessionNum = 0; sessionNum < sessions.size(); sessionNum++) {
185 auto& info = sessions.at(sessionNum);
186 sp<RpcSession>& session = info.session;
187
188 EXPECT_NE(nullptr, session);
189 EXPECT_NE(nullptr, session->state());
190 EXPECT_EQ(0u, session->state()->countBinders()) << (session->state()->dump(), "dump:");
191
192 wp<RpcSession> weakSession = session;
193 session = nullptr;
194
195 // b/244325464 - 'getStrongCount' is printing '1' on failure here, which indicates the
196 // the object should not actually be promotable. By looping, we distinguish a race here
197 // from a bug causing the object to not be promotable.
198 for (size_t i = 0; i < 3; i++) {
199 sp<RpcSession> strongSession = weakSession.promote();
200 EXPECT_EQ(nullptr, strongSession)
201 << "For session " << sessionNum << ". "
202 << (debugBacktrace(host.getPid()), debugBacktrace(getpid()),
203 "Leaked sess: ")
204 << strongSession->getStrongCount() << " checked time " << i;
205
206 if (strongSession != nullptr) {
207 sleep(1);
208 }
209 }
210 }
211 }
212
setCustomExitStatusCheck(std::function<void (int wstatus)> f)213 void setCustomExitStatusCheck(std::function<void(int wstatus)> f) override {
214 host.setCustomExitStatusCheck(std::move(f));
215 }
216
terminate()217 void terminate() override { host.terminate(); }
218 };
219
connectTo(const RpcSocketAddress & addr)220 static unique_fd connectTo(const RpcSocketAddress& addr) {
221 unique_fd serverFd(
222 TEMP_FAILURE_RETRY(socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0)));
223 if (!serverFd.ok()) {
224 PLOGF("Could not create socket %s", addr.toString().c_str());
225 }
226
227 if (0 != TEMP_FAILURE_RETRY(connect(serverFd.get(), addr.addr(), addr.addrSize()))) {
228 PLOGF("Could not connect to socket %s", addr.toString().c_str());
229 }
230 return serverFd;
231 }
232
233 #ifndef BINDER_RPC_TO_TRUSTY_TEST
connectToUnixBootstrap(const RpcTransportFd & transportFd)234 static unique_fd connectToUnixBootstrap(const RpcTransportFd& transportFd) {
235 unique_fd sockClient, sockServer;
236 if (!binder::Socketpair(SOCK_STREAM, &sockClient, &sockServer)) {
237 PLOGF("Failed socketpair()");
238 }
239
240 int zero = 0;
241 iovec iov{&zero, sizeof(zero)};
242 std::vector<std::variant<unique_fd, borrowed_fd>> fds;
243 fds.emplace_back(std::move(sockServer));
244
245 if (binder::os::sendMessageOnSocket(transportFd, &iov, 1, &fds) < 0) {
246 PLOGF("Failed sendMessageOnSocket");
247 }
248 return sockClient;
249 }
250 #endif // BINDER_RPC_TO_TRUSTY_TEST
251
newFactory(RpcSecurity rpcSecurity)252 std::unique_ptr<RpcTransportCtxFactory> BinderRpc::newFactory(RpcSecurity rpcSecurity) {
253 return newTlsFactory(rpcSecurity);
254 }
255
256 // This creates a new process serving an interface on a certain number of
257 // threads.
createRpcTestSocketServerProcessEtc(const BinderRpcOptions & options)258 std::unique_ptr<ProcessSession> BinderRpc::createRpcTestSocketServerProcessEtc(
259 const BinderRpcOptions& options) {
260 LOG_ALWAYS_FATAL_IF(options.numSessions < 1, "Must have at least one session to a server");
261
262 if (options.numIncomingConnectionsBySession.size() != 0) {
263 LOG_ALWAYS_FATAL_IF(options.numIncomingConnectionsBySession.size() != options.numSessions,
264 "%s: %zu != %zu", __func__,
265 options.numIncomingConnectionsBySession.size(), options.numSessions);
266 }
267
268 SocketType socketType = GetParam().type;
269 RpcSecurity rpcSecurity = GetParam().security;
270 uint32_t clientVersion = GetParam().clientVersion;
271 uint32_t serverVersion = GetParam().serverVersion;
272 bool singleThreaded = GetParam().singleThreaded;
273 bool noKernel = GetParam().noKernel;
274
275 std::string path = GetExecutableDirectory();
276 auto servicePath = path + "/binder_rpc_test_service" +
277 (singleThreaded ? "_single_threaded" : "") + (noKernel ? "_no_kernel" : "") +
278 TEST_FILE_SUFFIX;
279
280 unique_fd bootstrapClientFd, socketFd;
281
282 auto addr = allocateSocketAddress();
283 // Initializes the socket before the fork/exec.
284 if (socketType == SocketType::UNIX_RAW) {
285 socketFd = initUnixSocket(addr);
286 } else if (socketType == SocketType::UNIX_BOOTSTRAP) {
287 // Do not set O_CLOEXEC, bootstrapServerFd needs to survive fork/exec.
288 // This is because we cannot pass ParcelFileDescriptor over a pipe.
289 if (!binder::Socketpair(SOCK_STREAM, &bootstrapClientFd, &socketFd)) {
290 PLOGF("Failed socketpair()");
291 }
292 }
293
294 auto ret = std::make_unique<LinuxProcessSession>(
295 Process([=](borrowed_fd writeEnd, borrowed_fd readEnd) {
296 if (socketType == SocketType::TIPC) {
297 // Trusty has a single persistent service
298 return;
299 }
300
301 auto writeFd = std::to_string(writeEnd.get());
302 auto readFd = std::to_string(readEnd.get());
303 auto status = execl(servicePath.c_str(), servicePath.c_str(), writeFd.c_str(),
304 readFd.c_str(), NULL);
305 PLOGF("execl('%s', _, %s, %s) should not return at all, but it returned %d",
306 servicePath.c_str(), writeFd.c_str(), readFd.c_str(), status);
307 }));
308
309 BinderRpcTestServerConfig serverConfig;
310 serverConfig.numThreads = options.numThreads;
311 serverConfig.socketType = static_cast<int32_t>(socketType);
312 serverConfig.rpcSecurity = static_cast<int32_t>(rpcSecurity);
313 serverConfig.serverVersion = serverVersion;
314 serverConfig.addr = addr;
315 serverConfig.socketFd = socketFd.get();
316 for (auto mode : options.serverSupportedFileDescriptorTransportModes) {
317 serverConfig.serverSupportedFileDescriptorTransportModes.push_back(
318 static_cast<int32_t>(mode));
319 }
320 if (socketType != SocketType::TIPC) {
321 writeToFd(ret->host.writeEnd(), serverConfig);
322 }
323
324 std::vector<sp<RpcSession>> sessions;
325 auto certVerifier = std::make_shared<RpcCertificateVerifierSimple>();
326 for (size_t i = 0; i < options.numSessions; i++) {
327 std::unique_ptr<RpcTransportCtxFactory> factory;
328 if (socketType == SocketType::TIPC) {
329 #ifdef BINDER_RPC_TO_TRUSTY_TEST
330 factory = RpcTransportCtxFactoryTipcAndroid::make();
331 #else
332 LOG_ALWAYS_FATAL("TIPC socket type only supported on vendor");
333 #endif
334 } else {
335 factory = newTlsFactory(rpcSecurity, certVerifier);
336 }
337 sessions.emplace_back(RpcSession::make(std::move(factory)));
338 }
339
340 BinderRpcTestServerInfo serverInfo;
341 if (socketType != SocketType::TIPC) {
342 serverInfo = readFromFd<BinderRpcTestServerInfo>(ret->host.readEnd());
343 BinderRpcTestClientInfo clientInfo;
344 for (const auto& session : sessions) {
345 auto& parcelableCert = clientInfo.certs.emplace_back();
346 parcelableCert.data = session->getCertificate(RpcCertificateFormat::PEM);
347 }
348 writeToFd(ret->host.writeEnd(), clientInfo);
349
350 LOG_ALWAYS_FATAL_IF(serverInfo.port > std::numeric_limits<unsigned int>::max());
351 if (socketType == SocketType::INET) {
352 LOG_ALWAYS_FATAL_IF(0 == serverInfo.port);
353 }
354
355 if (rpcSecurity == RpcSecurity::TLS) {
356 const auto& serverCert = serverInfo.cert.data;
357 LOG_ALWAYS_FATAL_IF(
358 OK !=
359 certVerifier->addTrustedPeerCertificate(RpcCertificateFormat::PEM, serverCert));
360 }
361 }
362
363 status_t status;
364
365 for (size_t i = 0; i < sessions.size(); i++) {
366 const auto& session = sessions.at(i);
367
368 size_t numIncoming = options.numIncomingConnectionsBySession.size() > 0
369 ? options.numIncomingConnectionsBySession.at(i)
370 : 0;
371
372 LOG_ALWAYS_FATAL_IF(!session->setProtocolVersion(clientVersion));
373 session->setMaxIncomingThreads(numIncoming);
374 session->setMaxOutgoingConnections(options.numOutgoingConnections);
375 session->setFileDescriptorTransportMode(options.clientFileDescriptorTransportMode);
376
377 sockaddr_storage addr{};
378 socklen_t addrLen = 0;
379
380 switch (socketType) {
381 case SocketType::PRECONNECTED: {
382 sockaddr_un addr_un{};
383 addr_un.sun_family = AF_UNIX;
384 strcpy(addr_un.sun_path, serverConfig.addr.c_str());
385 addr = *reinterpret_cast<sockaddr_storage*>(&addr_un);
386 addrLen = sizeof(sockaddr_un);
387
388 status = session->setupPreconnectedClient({}, [=]() {
389 return connectTo(UnixSocketAddress(serverConfig.addr.c_str()));
390 });
391 } break;
392 case SocketType::UNIX_RAW:
393 case SocketType::UNIX: {
394 sockaddr_un addr_un{};
395 addr_un.sun_family = AF_UNIX;
396 strcpy(addr_un.sun_path, serverConfig.addr.c_str());
397 addr = *reinterpret_cast<sockaddr_storage*>(&addr_un);
398 addrLen = sizeof(sockaddr_un);
399
400 status = session->setupUnixDomainClient(serverConfig.addr.c_str());
401 } break;
402 case SocketType::UNIX_BOOTSTRAP:
403 status = session->setupUnixDomainSocketBootstrapClient(
404 unique_fd(dup(bootstrapClientFd.get())));
405 break;
406 case SocketType::VSOCK: {
407 sockaddr_vm addr_vm{
408 .svm_family = AF_VSOCK,
409 .svm_port = static_cast<unsigned int>(serverInfo.port),
410 .svm_cid = VMADDR_CID_LOCAL,
411 };
412 addr = *reinterpret_cast<sockaddr_storage*>(&addr_vm);
413 addrLen = sizeof(sockaddr_vm);
414
415 status = session->setupVsockClient(VMADDR_CID_LOCAL, serverInfo.port);
416 } break;
417 case SocketType::INET: {
418 const std::string ip_addr = "127.0.0.1";
419 sockaddr_in addr_in{};
420 addr_in.sin_family = AF_INET;
421 addr_in.sin_port = htons(serverInfo.port);
422 inet_aton(ip_addr.c_str(), &addr_in.sin_addr);
423 addr = *reinterpret_cast<sockaddr_storage*>(&addr_in);
424 addrLen = sizeof(sockaddr_in);
425
426 status = session->setupInetClient(ip_addr.c_str(), serverInfo.port);
427 } break;
428 case SocketType::TIPC:
429 status = session->setupPreconnectedClient({}, [=]() {
430 #ifdef BINDER_RPC_TO_TRUSTY_TEST
431 auto port = trustyIpcPort(serverVersion);
432 for (size_t i = 0; i < 5; i++) {
433 // Try to connect several times,
434 // in case the service is slow to start
435 int tipcFd = tipc_connect(kTrustyIpcDevice, port.c_str());
436 if (tipcFd >= 0) {
437 return unique_fd(tipcFd);
438 }
439 usleep(50000);
440 }
441 return unique_fd();
442 #else
443 LOG_ALWAYS_FATAL("Tried to connect to Trusty outside of vendor");
444 return unique_fd();
445 #endif
446 });
447 break;
448 default:
449 LOG_ALWAYS_FATAL("Unknown socket type");
450 }
451 if (options.allowConnectFailure && status != OK) {
452 ret->sessions.clear();
453 break;
454 }
455 LOG_ALWAYS_FATAL_IF(status != OK, "Could not connect: %s", statusToString(status).c_str());
456 ret->sessions.push_back({session, session->getRootObject(), addr, addrLen});
457 }
458 return ret;
459 }
460
TEST_P(BinderRpc,ThreadPoolGreaterThanEqualRequested)461 TEST_P(BinderRpc, ThreadPoolGreaterThanEqualRequested) {
462 if (clientOrServerSingleThreaded()) {
463 GTEST_SKIP() << "This test requires multiple threads";
464 }
465
466 constexpr size_t kNumThreads = 5;
467
468 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
469
470 EXPECT_OK(proc.rootIface->lock());
471
472 // block all but one thread taking locks
473 std::vector<std::thread> ts;
474 for (size_t i = 0; i < kNumThreads - 1; i++) {
475 ts.push_back(std::thread([&] { proc.rootIface->lockUnlock(); }));
476 }
477
478 usleep(100000); // give chance for calls on other threads
479
480 // other calls still work
481 EXPECT_EQ(OK, proc.rootBinder->pingBinder());
482
483 constexpr size_t blockTimeMs = 100;
484 size_t epochMsBefore = epochMillis();
485 // after this, we should never see a response within this time
486 EXPECT_OK(proc.rootIface->unlockInMsAsync(blockTimeMs));
487
488 // this call should be blocked for blockTimeMs
489 EXPECT_EQ(OK, proc.rootBinder->pingBinder());
490
491 size_t epochMsAfter = epochMillis();
492 EXPECT_GE(epochMsAfter, epochMsBefore + blockTimeMs) << epochMsBefore;
493
494 for (auto& t : ts) t.join();
495 }
496
testThreadPoolOverSaturated(sp<IBinderRpcTest> iface,size_t numCalls,size_t sleepMs)497 static void testThreadPoolOverSaturated(sp<IBinderRpcTest> iface, size_t numCalls, size_t sleepMs) {
498 size_t epochMsBefore = epochMillis();
499
500 std::vector<std::thread> ts;
501 for (size_t i = 0; i < numCalls; i++) {
502 ts.push_back(std::thread([&] { iface->sleepMs(sleepMs); }));
503 }
504
505 for (auto& t : ts) t.join();
506
507 size_t epochMsAfter = epochMillis();
508
509 EXPECT_GE(epochMsAfter, epochMsBefore + 2 * sleepMs);
510
511 // b/272429574, b/365294257
512 // This flakes too much to test. Parallelization is tested
513 // in ThreadPoolGreaterThanEqualRequested and other tests.
514 // Test to make sure calls are handled in parallel.
515 // EXPECT_LE(epochMsAfter, epochMsBefore + (numCalls - 1) * sleepMs);
516 }
517
TEST_P(BinderRpc,ThreadPoolOverSaturated)518 TEST_P(BinderRpc, ThreadPoolOverSaturated) {
519 if (clientOrServerSingleThreaded()) {
520 GTEST_SKIP() << "This test requires multiple threads";
521 }
522
523 constexpr size_t kNumThreads = 10;
524 constexpr size_t kNumCalls = kNumThreads + 3;
525 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
526
527 testThreadPoolOverSaturated(proc.rootIface, kNumCalls, 200 /*ms*/);
528 }
529
TEST_P(BinderRpc,ThreadPoolLimitOutgoing)530 TEST_P(BinderRpc, ThreadPoolLimitOutgoing) {
531 if (clientOrServerSingleThreaded()) {
532 GTEST_SKIP() << "This test requires multiple threads";
533 }
534
535 constexpr size_t kNumThreads = 20;
536 constexpr size_t kNumOutgoingConnections = 10;
537 constexpr size_t kNumCalls = kNumOutgoingConnections + 3;
538 auto proc = createRpcTestSocketServerProcess(
539 {.numThreads = kNumThreads, .numOutgoingConnections = kNumOutgoingConnections});
540
541 testThreadPoolOverSaturated(proc.rootIface, kNumCalls, 200 /*ms*/);
542 }
543
TEST_P(BinderRpc,ThreadingStressTest)544 TEST_P(BinderRpc, ThreadingStressTest) {
545 if (clientOrServerSingleThreaded()) {
546 GTEST_SKIP() << "This test requires multiple threads";
547 }
548
549 constexpr size_t kNumClientThreads = 5;
550 constexpr size_t kNumServerThreads = 5;
551 constexpr size_t kNumCalls = 50;
552
553 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumServerThreads});
554
555 std::vector<std::thread> threads;
556 for (size_t i = 0; i < kNumClientThreads; i++) {
557 threads.push_back(std::thread([&] {
558 for (size_t j = 0; j < kNumCalls; j++) {
559 sp<IBinder> out;
560 EXPECT_OK(proc.rootIface->repeatBinder(proc.rootBinder, &out));
561 EXPECT_EQ(proc.rootBinder, out);
562 }
563 }));
564 }
565
566 for (auto& t : threads) t.join();
567 }
568
saturateThreadPool(size_t threadCount,const sp<IBinderRpcTest> & iface)569 static void saturateThreadPool(size_t threadCount, const sp<IBinderRpcTest>& iface) {
570 std::vector<std::thread> threads;
571 for (size_t i = 0; i < threadCount; i++) {
572 threads.push_back(std::thread([&] { EXPECT_OK(iface->sleepMs(500)); }));
573 }
574 for (auto& t : threads) t.join();
575 }
576
TEST_P(BinderRpc,OnewayStressTest)577 TEST_P(BinderRpc, OnewayStressTest) {
578 if (clientOrServerSingleThreaded()) {
579 GTEST_SKIP() << "This test requires multiple threads";
580 }
581
582 constexpr size_t kNumClientThreads = 10;
583 constexpr size_t kNumServerThreads = 10;
584 constexpr size_t kNumCalls = 1000;
585
586 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumServerThreads});
587
588 std::vector<std::thread> threads;
589 for (size_t i = 0; i < kNumClientThreads; i++) {
590 threads.push_back(std::thread([&] {
591 for (size_t j = 0; j < kNumCalls; j++) {
592 EXPECT_OK(proc.rootIface->sendString("a"));
593 }
594 }));
595 }
596
597 for (auto& t : threads) t.join();
598
599 saturateThreadPool(kNumServerThreads, proc.rootIface);
600 }
601
TEST_P(BinderRpc,OnewayCallQueueingWithFds)602 TEST_P(BinderRpc, OnewayCallQueueingWithFds) {
603 if (!supportsFdTransport()) {
604 GTEST_SKIP() << "Would fail trivially (which is tested elsewhere)";
605 }
606 if (clientOrServerSingleThreaded()) {
607 GTEST_SKIP() << "This test requires multiple threads";
608 }
609
610 constexpr size_t kNumServerThreads = 3;
611
612 // This test forces a oneway transaction to be queued by issuing two
613 // `blockingSendFdOneway` calls, then drains the queue by issuing two
614 // `blockingRecvFd` calls.
615 //
616 // For more details about the queuing semantics see
617 // https://developer.android.com/reference/android/os/IBinder#FLAG_ONEWAY
618
619 auto proc = createRpcTestSocketServerProcess({
620 .numThreads = kNumServerThreads,
621 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::UNIX,
622 .serverSupportedFileDescriptorTransportModes =
623 {RpcSession::FileDescriptorTransportMode::UNIX},
624 });
625
626 EXPECT_OK(proc.rootIface->blockingSendFdOneway(
627 android::os::ParcelFileDescriptor(mockFileDescriptor("a"))));
628 EXPECT_OK(proc.rootIface->blockingSendFdOneway(
629 android::os::ParcelFileDescriptor(mockFileDescriptor("b"))));
630
631 android::os::ParcelFileDescriptor fdA;
632 EXPECT_OK(proc.rootIface->blockingRecvFd(&fdA));
633 std::string result;
634 ASSERT_TRUE(ReadFdToString(fdA.get(), &result));
635 EXPECT_EQ(result, "a");
636
637 android::os::ParcelFileDescriptor fdB;
638 EXPECT_OK(proc.rootIface->blockingRecvFd(&fdB));
639 ASSERT_TRUE(ReadFdToString(fdB.get(), &result));
640 EXPECT_EQ(result, "b");
641
642 saturateThreadPool(kNumServerThreads, proc.rootIface);
643 }
644
TEST_P(BinderRpc,OnewayCallQueueing)645 TEST_P(BinderRpc, OnewayCallQueueing) {
646 if (clientOrServerSingleThreaded()) {
647 GTEST_SKIP() << "This test requires multiple threads";
648 }
649
650 constexpr size_t kNumQueued = 10;
651 constexpr size_t kNumExtraServerThreads = 4;
652
653 // make sure calls to the same object happen on the same thread
654 auto proc = createRpcTestSocketServerProcess({.numThreads = 1 + kNumExtraServerThreads});
655
656 // all these *Oneway commands should be queued on the server sequentially,
657 // even though there are multiple threads.
658 for (size_t i = 0; i + 1 < kNumQueued; i++) {
659 proc.rootIface->blockingSendIntOneway(i);
660 }
661 for (size_t i = 0; i + 1 < kNumQueued; i++) {
662 int n;
663 proc.rootIface->blockingRecvInt(&n);
664 EXPECT_EQ(n, static_cast<ssize_t>(i));
665 }
666
667 saturateThreadPool(1 + kNumExtraServerThreads, proc.rootIface);
668 }
669
TEST_P(BinderRpc,OnewayCallExhaustion)670 TEST_P(BinderRpc, OnewayCallExhaustion) {
671 if (clientOrServerSingleThreaded()) {
672 GTEST_SKIP() << "This test requires multiple threads";
673 }
674
675 constexpr size_t kNumClients = 2;
676 constexpr size_t kTooLongMs = 1000;
677
678 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumClients, .numSessions = 2});
679
680 // Build up oneway calls on the second session to make sure it terminates
681 // and shuts down. The first session should be unaffected (proc destructor
682 // checks the first session).
683 auto iface = interface_cast<IBinderRpcTest>(proc.proc->sessions.at(1).root);
684
685 std::vector<std::thread> threads;
686 for (size_t i = 0; i < kNumClients; i++) {
687 // one of these threads will get stuck queueing a transaction once the
688 // socket fills up, the other will be able to fill up transactions on
689 // this object
690 threads.push_back(std::thread([&] {
691 while (iface->sleepMsAsync(kTooLongMs).isOk()) {
692 }
693 }));
694 }
695 for (auto& t : threads) t.join();
696
697 Status status = iface->sleepMsAsync(kTooLongMs);
698 EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
699
700 // now that it has died, wait for the remote session to shutdown
701 std::vector<int32_t> remoteCounts;
702 do {
703 EXPECT_OK(proc.rootIface->countBinders(&remoteCounts));
704 } while (remoteCounts.size() == kNumClients);
705
706 // the second session should be shutdown in the other process by the time we
707 // are able to join above (it'll only be hung up once it finishes processing
708 // any pending commands). We need to erase this session from the record
709 // here, so that the destructor for our session won't check that this
710 // session is valid, but we still want it to test the other session.
711 proc.proc->sessions.erase(proc.proc->sessions.begin() + 1);
712 }
713
TEST_P(BinderRpc,SessionWithIncomingThreadpoolDoesntLeak)714 TEST_P(BinderRpc, SessionWithIncomingThreadpoolDoesntLeak) {
715 if (clientOrServerSingleThreaded()) {
716 GTEST_SKIP() << "This test requires multiple threads";
717 }
718
719 // session 0 - will check for leaks in destrutor of proc
720 // session 1 - we want to make sure it gets deleted when we drop all references to it
721 auto proc = createRpcTestSocketServerProcess(
722 {.numThreads = 1, .numSessions = 2, .numIncomingConnectionsBySession = {0, 1}});
723
724 wp<RpcSession> session = proc.proc->sessions.at(1).session;
725
726 // remove all references to the second session
727 proc.proc->sessions.at(1).root = nullptr;
728 proc.proc->sessions.erase(proc.proc->sessions.begin() + 1);
729
730 // TODO(b/271830568) more efficient way to wait for other incoming threadpool
731 // to drain commands.
732 for (size_t i = 0; i < 100; i++) {
733 usleep(10 * 1000);
734 if (session.promote() == nullptr) break;
735 }
736
737 EXPECT_EQ(nullptr, session.promote());
738
739 // now that it has died, wait for the remote session to shutdown
740 std::vector<int32_t> remoteCounts;
741 do {
742 EXPECT_OK(proc.rootIface->countBinders(&remoteCounts));
743 } while (remoteCounts.size() > 1);
744 }
745
TEST_P(BinderRpc,SingleDeathRecipient)746 TEST_P(BinderRpc, SingleDeathRecipient) {
747 if (clientOrServerSingleThreaded()) {
748 GTEST_SKIP() << "This test requires multiple threads";
749 }
750 class MyDeathRec : public IBinder::DeathRecipient {
751 public:
752 void binderDied(const wp<IBinder>& /* who */) override {
753 dead = true;
754 mCv.notify_one();
755 }
756 std::mutex mMtx;
757 std::condition_variable mCv;
758 bool dead = false;
759 };
760
761 // Death recipient needs to have an incoming connection to be called
762 auto proc = createRpcTestSocketServerProcess(
763 {.numThreads = 1, .numSessions = 1, .numIncomingConnectionsBySession = {1}});
764
765 auto dr = sp<MyDeathRec>::make();
766 ASSERT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
767
768 if (auto status = proc.rootIface->scheduleShutdown(); !status.isOk()) {
769 EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
770 }
771
772 std::unique_lock<std::mutex> lock(dr->mMtx);
773 ASSERT_TRUE(dr->mCv.wait_for(lock, 100ms, [&]() { return dr->dead; }));
774
775 // need to wait for the session to shutdown so we don't "Leak session"
776 // can't do this before checking the death recipient by calling
777 // forceShutdown earlier, because shutdownAndWait will also trigger
778 // a death recipient, but if we had a way to wait for the service
779 // to gracefully shutdown, we could use that here.
780 EXPECT_TRUE(proc.proc->sessions.at(0).session->shutdownAndWait(true));
781 proc.expectAlreadyShutdown = true;
782 }
783
TEST_P(BinderRpc,SingleDeathRecipientOnShutdown)784 TEST_P(BinderRpc, SingleDeathRecipientOnShutdown) {
785 if (clientOrServerSingleThreaded()) {
786 GTEST_SKIP() << "This test requires multiple threads";
787 }
788 class MyDeathRec : public IBinder::DeathRecipient {
789 public:
790 void binderDied(const wp<IBinder>& /* who */) override {
791 dead = true;
792 mCv.notify_one();
793 }
794 std::mutex mMtx;
795 std::condition_variable mCv;
796 bool dead = false;
797 };
798
799 // Death recipient needs to have an incoming connection to be called
800 auto proc = createRpcTestSocketServerProcess(
801 {.numThreads = 1, .numSessions = 1, .numIncomingConnectionsBySession = {1}});
802
803 auto dr = sp<MyDeathRec>::make();
804 EXPECT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
805
806 // Explicitly calling shutDownAndWait will cause the death recipients
807 // to be called.
808 EXPECT_TRUE(proc.proc->sessions.at(0).session->shutdownAndWait(true));
809
810 std::unique_lock<std::mutex> lock(dr->mMtx);
811 if (!dr->dead) {
812 EXPECT_EQ(std::cv_status::no_timeout, dr->mCv.wait_for(lock, 100ms));
813 }
814 EXPECT_TRUE(dr->dead) << "Failed to receive the death notification.";
815
816 proc.proc->terminate();
817 proc.proc->setCustomExitStatusCheck([](int wstatus) {
818 EXPECT_TRUE(WIFSIGNALED(wstatus) && WTERMSIG(wstatus) == SIGTERM)
819 << "server process failed incorrectly: " << WaitStatusToString(wstatus);
820 });
821 proc.expectAlreadyShutdown = true;
822 }
823
TEST_P(BinderRpc,DeathRecipientFailsWithoutIncoming)824 TEST_P(BinderRpc, DeathRecipientFailsWithoutIncoming) {
825 if (socketType() == SocketType::TIPC) {
826 // This should work, but Trusty takes too long to restart the service
827 GTEST_SKIP() << "Service death test not supported on Trusty";
828 }
829 class MyDeathRec : public IBinder::DeathRecipient {
830 public:
831 void binderDied(const wp<IBinder>& /* who */) override {}
832 };
833
834 auto proc = createRpcTestSocketServerProcess({.numThreads = 1, .numSessions = 1});
835
836 auto dr = sp<MyDeathRec>::make();
837 EXPECT_EQ(INVALID_OPERATION, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
838 }
839
TEST_P(BinderRpc,UnlinkDeathRecipient)840 TEST_P(BinderRpc, UnlinkDeathRecipient) {
841 if (clientOrServerSingleThreaded()) {
842 GTEST_SKIP() << "This test requires multiple threads";
843 }
844 class MyDeathRec : public IBinder::DeathRecipient {
845 public:
846 void binderDied(const wp<IBinder>& /* who */) override {
847 GTEST_FAIL() << "This should not be called after unlinkToDeath";
848 }
849 };
850
851 // Death recipient needs to have an incoming connection to be called
852 auto proc = createRpcTestSocketServerProcess(
853 {.numThreads = 1, .numSessions = 1, .numIncomingConnectionsBySession = {1}});
854
855 auto dr = sp<MyDeathRec>::make();
856 ASSERT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
857 ASSERT_EQ(OK, proc.rootBinder->unlinkToDeath(dr, (void*)1, 0, nullptr));
858
859 proc.forceShutdown();
860 }
861
TEST_P(BinderRpc,Die)862 TEST_P(BinderRpc, Die) {
863 if (socketType() == SocketType::TIPC) {
864 // This should work, but Trusty takes too long to restart the service
865 GTEST_SKIP() << "Service death test not supported on Trusty";
866 }
867
868 for (bool doDeathCleanup : {true, false}) {
869 auto proc = createRpcTestSocketServerProcess({});
870
871 // make sure there is some state during crash
872 // 1. we hold their binder
873 sp<IBinderRpcSession> session;
874 EXPECT_OK(proc.rootIface->openSession("happy", &session));
875 // 2. they hold our binder
876 sp<IBinder> binder = new BBinder();
877 EXPECT_OK(proc.rootIface->holdBinder(binder));
878
879 EXPECT_EQ(DEAD_OBJECT, proc.rootIface->die(doDeathCleanup).transactionError())
880 << "Do death cleanup: " << doDeathCleanup;
881
882 proc.proc->setCustomExitStatusCheck([](int wstatus) {
883 EXPECT_TRUE(WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 1)
884 << "server process failed incorrectly: " << WaitStatusToString(wstatus);
885 });
886 proc.expectAlreadyShutdown = true;
887 }
888 }
889
TEST_P(BinderRpc,UseKernelBinderCallingId)890 TEST_P(BinderRpc, UseKernelBinderCallingId) {
891 // This test only works if the current process shared the internal state of
892 // ProcessState with the service across the call to fork(). Both the static
893 // libraries and libbinder.so have their own separate copies of all the
894 // globals, so the test only works when the test client and service both use
895 // libbinder.so (when using static libraries, even a client and service
896 // using the same kind of static library should have separate copies of the
897 // variables).
898 if (!kEnableSharedLibs || serverSingleThreaded() || noKernel()) {
899 GTEST_SKIP() << "Test disabled because Binder kernel driver was disabled "
900 "at build time.";
901 }
902
903 auto proc = createRpcTestSocketServerProcess({});
904
905 // we can't allocate IPCThreadState so actually the first time should
906 // succeed :(
907 EXPECT_OK(proc.rootIface->useKernelBinderCallingId());
908
909 // second time! we catch the error :)
910 EXPECT_EQ(DEAD_OBJECT, proc.rootIface->useKernelBinderCallingId().transactionError());
911
912 proc.proc->setCustomExitStatusCheck([](int wstatus) {
913 EXPECT_TRUE(WIFSIGNALED(wstatus) && WTERMSIG(wstatus) == SIGABRT)
914 << "server process failed incorrectly: " << WaitStatusToString(wstatus);
915 });
916 proc.expectAlreadyShutdown = true;
917 }
918
TEST_P(BinderRpc,FileDescriptorTransportRejectNone)919 TEST_P(BinderRpc, FileDescriptorTransportRejectNone) {
920 if (socketType() == SocketType::TIPC) {
921 GTEST_SKIP() << "File descriptor tests not supported on Trusty (yet)";
922 }
923
924 auto proc = createRpcTestSocketServerProcess({
925 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::NONE,
926 .serverSupportedFileDescriptorTransportModes =
927 {RpcSession::FileDescriptorTransportMode::UNIX},
928 .allowConnectFailure = true,
929 });
930 EXPECT_TRUE(proc.proc->sessions.empty()) << "session connections should have failed";
931 proc.proc->terminate();
932 proc.proc->setCustomExitStatusCheck([](int wstatus) {
933 EXPECT_TRUE(WIFSIGNALED(wstatus) && WTERMSIG(wstatus) == SIGTERM)
934 << "server process failed incorrectly: " << WaitStatusToString(wstatus);
935 });
936 proc.expectAlreadyShutdown = true;
937 }
938
TEST_P(BinderRpc,FileDescriptorTransportRejectUnix)939 TEST_P(BinderRpc, FileDescriptorTransportRejectUnix) {
940 if (socketType() == SocketType::TIPC) {
941 GTEST_SKIP() << "File descriptor tests not supported on Trusty (yet)";
942 }
943
944 auto proc = createRpcTestSocketServerProcess({
945 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::UNIX,
946 .serverSupportedFileDescriptorTransportModes =
947 {RpcSession::FileDescriptorTransportMode::NONE},
948 .allowConnectFailure = true,
949 });
950 EXPECT_TRUE(proc.proc->sessions.empty()) << "session connections should have failed";
951 proc.proc->terminate();
952 proc.proc->setCustomExitStatusCheck([](int wstatus) {
953 EXPECT_TRUE(WIFSIGNALED(wstatus) && WTERMSIG(wstatus) == SIGTERM)
954 << "server process failed incorrectly: " << WaitStatusToString(wstatus);
955 });
956 proc.expectAlreadyShutdown = true;
957 }
958
TEST_P(BinderRpc,FileDescriptorTransportOptionalUnix)959 TEST_P(BinderRpc, FileDescriptorTransportOptionalUnix) {
960 if (socketType() == SocketType::TIPC) {
961 GTEST_SKIP() << "File descriptor tests not supported on Trusty (yet)";
962 }
963
964 auto proc = createRpcTestSocketServerProcess({
965 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::NONE,
966 .serverSupportedFileDescriptorTransportModes =
967 {RpcSession::FileDescriptorTransportMode::NONE,
968 RpcSession::FileDescriptorTransportMode::UNIX},
969 });
970
971 android::os::ParcelFileDescriptor out;
972 auto status = proc.rootIface->echoAsFile("hello", &out);
973 EXPECT_EQ(status.transactionError(), FDS_NOT_ALLOWED) << status;
974 }
975
TEST_P(BinderRpc,ReceiveFile)976 TEST_P(BinderRpc, ReceiveFile) {
977 if (socketType() == SocketType::TIPC) {
978 GTEST_SKIP() << "File descriptor tests not supported on Trusty (yet)";
979 }
980
981 auto proc = createRpcTestSocketServerProcess({
982 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::UNIX,
983 .serverSupportedFileDescriptorTransportModes =
984 {RpcSession::FileDescriptorTransportMode::UNIX},
985 });
986
987 android::os::ParcelFileDescriptor out;
988 auto status = proc.rootIface->echoAsFile("hello", &out);
989 if (!supportsFdTransport()) {
990 EXPECT_EQ(status.transactionError(), BAD_VALUE) << status;
991 return;
992 }
993 ASSERT_TRUE(status.isOk()) << status;
994
995 std::string result;
996 ASSERT_TRUE(ReadFdToString(out.get(), &result));
997 ASSERT_EQ(result, "hello");
998 }
999
TEST_P(BinderRpc,SendFiles)1000 TEST_P(BinderRpc, SendFiles) {
1001 if (socketType() == SocketType::TIPC) {
1002 GTEST_SKIP() << "File descriptor tests not supported on Trusty (yet)";
1003 }
1004
1005 auto proc = createRpcTestSocketServerProcess({
1006 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::UNIX,
1007 .serverSupportedFileDescriptorTransportModes =
1008 {RpcSession::FileDescriptorTransportMode::UNIX},
1009 });
1010
1011 std::vector<android::os::ParcelFileDescriptor> files;
1012 files.emplace_back(android::os::ParcelFileDescriptor(mockFileDescriptor("123")));
1013 files.emplace_back(android::os::ParcelFileDescriptor(mockFileDescriptor("a")));
1014 files.emplace_back(android::os::ParcelFileDescriptor(mockFileDescriptor("b")));
1015 files.emplace_back(android::os::ParcelFileDescriptor(mockFileDescriptor("cd")));
1016
1017 android::os::ParcelFileDescriptor out;
1018 auto status = proc.rootIface->concatFiles(files, &out);
1019 if (!supportsFdTransport()) {
1020 EXPECT_EQ(status.transactionError(), BAD_VALUE) << status;
1021 return;
1022 }
1023 ASSERT_TRUE(status.isOk()) << status;
1024
1025 std::string result;
1026 EXPECT_TRUE(ReadFdToString(out.get(), &result));
1027 EXPECT_EQ(result, "123abcd");
1028 }
1029
TEST_P(BinderRpc,SendMaxFiles)1030 TEST_P(BinderRpc, SendMaxFiles) {
1031 if (!supportsFdTransport()) {
1032 GTEST_SKIP() << "Would fail trivially (which is tested by BinderRpc::SendFiles)";
1033 }
1034
1035 auto proc = createRpcTestSocketServerProcess({
1036 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::UNIX,
1037 .serverSupportedFileDescriptorTransportModes =
1038 {RpcSession::FileDescriptorTransportMode::UNIX},
1039 });
1040
1041 std::vector<android::os::ParcelFileDescriptor> files;
1042 for (int i = 0; i < 253; i++) {
1043 files.emplace_back(android::os::ParcelFileDescriptor(mockFileDescriptor("a")));
1044 }
1045
1046 android::os::ParcelFileDescriptor out;
1047 auto status = proc.rootIface->concatFiles(files, &out);
1048 ASSERT_TRUE(status.isOk()) << status;
1049
1050 std::string result;
1051 EXPECT_TRUE(ReadFdToString(out.get(), &result));
1052 EXPECT_EQ(result, std::string(253, 'a'));
1053 }
1054
TEST_P(BinderRpc,SendTooManyFiles)1055 TEST_P(BinderRpc, SendTooManyFiles) {
1056 if (!supportsFdTransport()) {
1057 GTEST_SKIP() << "Would fail trivially (which is tested by BinderRpc::SendFiles)";
1058 }
1059
1060 auto proc = createRpcTestSocketServerProcess({
1061 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::UNIX,
1062 .serverSupportedFileDescriptorTransportModes =
1063 {RpcSession::FileDescriptorTransportMode::UNIX},
1064 });
1065
1066 std::vector<android::os::ParcelFileDescriptor> files;
1067 for (int i = 0; i < 254; i++) {
1068 files.emplace_back(android::os::ParcelFileDescriptor(mockFileDescriptor("a")));
1069 }
1070
1071 android::os::ParcelFileDescriptor out;
1072 auto status = proc.rootIface->concatFiles(files, &out);
1073 EXPECT_EQ(status.transactionError(), BAD_VALUE) << status;
1074 }
1075
TEST_P(BinderRpc,AppendInvalidFd)1076 TEST_P(BinderRpc, AppendInvalidFd) {
1077 if (socketType() == SocketType::TIPC) {
1078 GTEST_SKIP() << "File descriptor tests not supported on Trusty (yet)";
1079 }
1080
1081 auto proc = createRpcTestSocketServerProcess({
1082 .clientFileDescriptorTransportMode = RpcSession::FileDescriptorTransportMode::UNIX,
1083 .serverSupportedFileDescriptorTransportModes =
1084 {RpcSession::FileDescriptorTransportMode::UNIX},
1085 });
1086
1087 int badFd = fcntl(STDERR_FILENO, F_DUPFD_CLOEXEC, 0);
1088 ASSERT_NE(badFd, -1);
1089
1090 // Close the file descriptor so it becomes invalid for dup
1091 close(badFd);
1092
1093 Parcel p1;
1094 p1.markForBinder(proc.rootBinder);
1095 p1.writeInt32(3);
1096 EXPECT_EQ(OK, p1.writeFileDescriptor(badFd, false));
1097
1098 Parcel pRaw;
1099 pRaw.markForBinder(proc.rootBinder);
1100 EXPECT_EQ(OK, pRaw.appendFrom(&p1, 0, p1.dataSize()));
1101
1102 pRaw.setDataPosition(0);
1103 EXPECT_EQ(3, pRaw.readInt32());
1104 ASSERT_EQ(-1, pRaw.readFileDescriptor());
1105 }
1106
1107 #ifndef __ANDROID_VENDOR__ // No AIBinder_fromPlatformBinder on vendor
TEST_P(BinderRpc,WorksWithLibbinderNdkPing)1108 TEST_P(BinderRpc, WorksWithLibbinderNdkPing) {
1109 if constexpr (!kEnableSharedLibs) {
1110 GTEST_SKIP() << "Test disabled because Binder was built as a static library";
1111 }
1112
1113 auto proc = createRpcTestSocketServerProcess({});
1114
1115 ndk::SpAIBinder binder = ndk::SpAIBinder(AIBinder_fromPlatformBinder(proc.rootBinder));
1116 ASSERT_NE(binder, nullptr);
1117
1118 ASSERT_EQ(STATUS_OK, AIBinder_ping(binder.get()));
1119 }
1120
TEST_P(BinderRpc,WorksWithLibbinderNdkUserTransaction)1121 TEST_P(BinderRpc, WorksWithLibbinderNdkUserTransaction) {
1122 if constexpr (!kEnableSharedLibs) {
1123 GTEST_SKIP() << "Test disabled because Binder was built as a static library";
1124 }
1125
1126 auto proc = createRpcTestSocketServerProcess({});
1127
1128 ndk::SpAIBinder binder = ndk::SpAIBinder(AIBinder_fromPlatformBinder(proc.rootBinder));
1129 ASSERT_NE(binder, nullptr);
1130
1131 auto ndkBinder = aidl::IBinderRpcTest::fromBinder(binder);
1132 ASSERT_NE(ndkBinder, nullptr);
1133
1134 std::string out;
1135 ndk::ScopedAStatus status = ndkBinder->doubleString("aoeu", &out);
1136 ASSERT_TRUE(status.isOk()) << status.getDescription();
1137 ASSERT_EQ("aoeuaoeu", out);
1138 }
1139 #endif // __ANDROID_VENDOR__
1140
countFds()1141 ssize_t countFds() {
1142 DIR* dir = opendir("/proc/self/fd/");
1143 if (dir == nullptr) return -1;
1144 ssize_t ret = 0;
1145 dirent* ent;
1146 while ((ent = readdir(dir)) != nullptr) ret++;
1147 closedir(dir);
1148 return ret;
1149 }
1150
TEST_P(BinderRpc,Fds)1151 TEST_P(BinderRpc, Fds) {
1152 if (serverSingleThreaded()) {
1153 GTEST_SKIP() << "This test requires multiple threads";
1154 }
1155 if (socketType() == SocketType::TIPC) {
1156 GTEST_SKIP() << "File descriptor tests not supported on Trusty (yet)";
1157 }
1158
1159 ssize_t beforeFds = countFds();
1160 ASSERT_GE(beforeFds, 0);
1161 {
1162 auto proc = createRpcTestSocketServerProcess({.numThreads = 10});
1163 ASSERT_EQ(OK, proc.rootBinder->pingBinder());
1164 }
1165 ASSERT_EQ(beforeFds, countFds()) << (system("ls -l /proc/self/fd/"), "fd leak?");
1166 }
1167
1168 // TODO need to add IServiceManager.cpp/.h to libbinder_no_kernel
1169 #ifdef BINDER_WITH_KERNEL_IPC
1170
1171 class BinderRpcAccessor : public BinderRpc {
SetUp()1172 void SetUp() override {
1173 if (serverSingleThreaded()) {
1174 // This blocks on android::FdTrigger::triggerablePoll when attempting to set
1175 // up the client RpcSession
1176 GTEST_SKIP() << "Accessors are not supported for single threaded libbinder";
1177 }
1178 if (rpcSecurity() == RpcSecurity::TLS) {
1179 GTEST_SKIP() << "Accessors are not supported with TLS";
1180 // ... for now
1181 }
1182
1183 if (socketType() == SocketType::UNIX_BOOTSTRAP) {
1184 GTEST_SKIP() << "Accessors do not support UNIX_BOOTSTRAP because no connection "
1185 "information is known";
1186 }
1187 if (socketType() == SocketType::TIPC) {
1188 GTEST_SKIP() << "Accessors do not support TIPC because the socket transport is not "
1189 "known in libbinder";
1190 }
1191 BinderRpc::SetUp();
1192 }
1193 };
1194
waitForExtraSessionCleanup(const BinderRpcTestProcessSession & proc)1195 inline void waitForExtraSessionCleanup(const BinderRpcTestProcessSession& proc) {
1196 // Need to give the server some time to delete its RpcSession after our last
1197 // reference is dropped, closing the connection. Check for up to 1 second,
1198 // every 10 ms.
1199 for (size_t i = 0; i < 100; i++) {
1200 std::vector<int32_t> remoteCounts;
1201 EXPECT_OK(proc.rootIface->countBinders(&remoteCounts));
1202 // We exect the original binder to still be alive, we just want to wait
1203 // for this extra session to be cleaned up.
1204 if (remoteCounts.size() == proc.proc->sessions.size()) break;
1205 usleep(10000);
1206 }
1207 }
1208
TEST_P(BinderRpcAccessor,InjectAndGetServiceHappyPath)1209 TEST_P(BinderRpcAccessor, InjectAndGetServiceHappyPath) {
1210 constexpr size_t kNumThreads = 10;
1211 const String16 kInstanceName("super.cool.service/better_than_default");
1212
1213 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
1214 EXPECT_EQ(OK, proc.rootBinder->pingBinder());
1215
1216 auto receipt = addAccessorProvider(
1217 {String8(kInstanceName).c_str()}, [&](const String16& name) -> sp<IBinder> {
1218 return createAccessor(name,
1219 [&](const String16& name, sockaddr* outAddr,
1220 socklen_t addrSize) -> status_t {
1221 if (outAddr == nullptr ||
1222 addrSize < proc.proc->sessions[0].addrLen) {
1223 return BAD_VALUE;
1224 }
1225 if (name == kInstanceName) {
1226 if (proc.proc->sessions[0].addr.ss_family ==
1227 AF_UNIX) {
1228 sockaddr_un* un = reinterpret_cast<sockaddr_un*>(
1229 &proc.proc->sessions[0].addr);
1230 ALOGE("inside callback: %s", un->sun_path);
1231 }
1232 std::memcpy(outAddr, &proc.proc->sessions[0].addr,
1233 proc.proc->sessions[0].addrLen);
1234 return OK;
1235 }
1236 return NAME_NOT_FOUND;
1237 });
1238 });
1239
1240 EXPECT_FALSE(receipt.expired());
1241
1242 sp<IBinder> binder = defaultServiceManager()->checkService(kInstanceName);
1243 sp<IBinderRpcTest> service = checked_interface_cast<IBinderRpcTest>(binder);
1244 EXPECT_NE(service, nullptr);
1245
1246 sp<IBinder> out;
1247 EXPECT_OK(service->repeatBinder(binder, &out));
1248 EXPECT_EQ(binder, out);
1249
1250 out.clear();
1251 binder.clear();
1252 service.clear();
1253
1254 status_t status = removeAccessorProvider(receipt);
1255 EXPECT_EQ(status, OK);
1256
1257 waitForExtraSessionCleanup(proc);
1258 }
1259
TEST_P(BinderRpcAccessor,InjectNoAccessorProvided)1260 TEST_P(BinderRpcAccessor, InjectNoAccessorProvided) {
1261 const String16 kInstanceName("doesnt_matter_nothing_checks");
1262
1263 bool isProviderDeleted = false;
1264
1265 auto receipt = addAccessorProvider({String8(kInstanceName).c_str()},
1266 [&](const String16&) -> sp<IBinder> { return nullptr; });
1267 EXPECT_FALSE(receipt.expired());
1268
1269 sp<IBinder> binder = defaultServiceManager()->checkService(kInstanceName);
1270 EXPECT_EQ(binder, nullptr);
1271
1272 status_t status = removeAccessorProvider(receipt);
1273 EXPECT_EQ(status, OK);
1274 }
1275
TEST_P(BinderRpcAccessor,InjectDuplicateAccessorProvider)1276 TEST_P(BinderRpcAccessor, InjectDuplicateAccessorProvider) {
1277 const String16 kInstanceName("super.cool.service/better_than_default");
1278 const String16 kInstanceName2("super.cool.service/better_than_default2");
1279
1280 auto receipt =
1281 addAccessorProvider({String8(kInstanceName).c_str(), String8(kInstanceName2).c_str()},
1282 [&](const String16&) -> sp<IBinder> { return nullptr; });
1283 EXPECT_FALSE(receipt.expired());
1284 // reject this because it's associated with an already used instance name
1285 auto receipt2 = addAccessorProvider({String8(kInstanceName).c_str()},
1286 [&](const String16&) -> sp<IBinder> { return nullptr; });
1287 EXPECT_TRUE(receipt2.expired());
1288
1289 // the first provider should still be usable
1290 sp<IBinder> binder = defaultServiceManager()->checkService(kInstanceName);
1291 EXPECT_EQ(binder, nullptr);
1292
1293 status_t status = removeAccessorProvider(receipt);
1294 EXPECT_EQ(status, OK);
1295 }
1296
TEST_P(BinderRpcAccessor,InjectAccessorProviderNoInstance)1297 TEST_P(BinderRpcAccessor, InjectAccessorProviderNoInstance) {
1298 auto receipt = addAccessorProvider({}, [&](const String16&) -> sp<IBinder> { return nullptr; });
1299 EXPECT_TRUE(receipt.expired());
1300 }
1301
TEST_P(BinderRpcAccessor,InjectNoSockaddrProvided)1302 TEST_P(BinderRpcAccessor, InjectNoSockaddrProvided) {
1303 constexpr size_t kNumThreads = 10;
1304 const String16 kInstanceName("super.cool.service/better_than_default");
1305
1306 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
1307 EXPECT_EQ(OK, proc.rootBinder->pingBinder());
1308
1309 bool isProviderDeleted = false;
1310 bool isAccessorDeleted = false;
1311
1312 auto receipt = addAccessorProvider({String8(kInstanceName).c_str()},
1313 [&](const String16& name) -> sp<IBinder> {
1314 return createAccessor(name,
1315 [&](const String16&, sockaddr*,
1316 socklen_t) -> status_t {
1317 // don't fill in outAddr
1318 return NAME_NOT_FOUND;
1319 });
1320 });
1321
1322 EXPECT_FALSE(receipt.expired());
1323
1324 sp<IBinder> binder = defaultServiceManager()->checkService(kInstanceName);
1325 EXPECT_EQ(binder, nullptr);
1326
1327 status_t status = removeAccessorProvider(receipt);
1328 EXPECT_EQ(status, OK);
1329 }
1330
1331 constexpr const char* kARpcInstance = "some.instance.name.IFoo/default";
1332 const char* kARpcSupportedServices[] = {
1333 kARpcInstance,
1334 };
1335 const uint32_t kARpcNumSupportedServices = 1;
1336
1337 struct ConnectionInfoData {
1338 sockaddr_storage addr;
1339 socklen_t len;
1340 bool* isDeleted;
~ConnectionInfoDataandroid::ConnectionInfoData1341 ~ConnectionInfoData() {
1342 if (isDeleted) *isDeleted = true;
1343 }
1344 };
1345
1346 struct AccessorProviderData {
1347 sockaddr_storage addr;
1348 socklen_t len;
1349 bool* isDeleted;
~AccessorProviderDataandroid::AccessorProviderData1350 ~AccessorProviderData() {
1351 if (isDeleted) *isDeleted = true;
1352 }
1353 };
1354
accessorProviderDataOnDelete(void * data)1355 void accessorProviderDataOnDelete(void* data) {
1356 delete reinterpret_cast<AccessorProviderData*>(data);
1357 }
infoProviderDataOnDelete(void * data)1358 void infoProviderDataOnDelete(void* data) {
1359 delete reinterpret_cast<ConnectionInfoData*>(data);
1360 }
1361
infoProvider(const char * instance,void * cookie)1362 ABinderRpc_ConnectionInfo* infoProvider(const char* instance, void* cookie) {
1363 if (instance == nullptr || cookie == nullptr) return nullptr;
1364 ConnectionInfoData* data = reinterpret_cast<ConnectionInfoData*>(cookie);
1365 return ABinderRpc_ConnectionInfo_new(reinterpret_cast<const sockaddr*>(&data->addr), data->len);
1366 }
1367
getAccessor(const char * instance,void * cookie)1368 ABinderRpc_Accessor* getAccessor(const char* instance, void* cookie) {
1369 if (instance == nullptr || cookie == nullptr) return nullptr;
1370 if (0 != strcmp(instance, kARpcInstance)) return nullptr;
1371
1372 AccessorProviderData* data = reinterpret_cast<AccessorProviderData*>(cookie);
1373
1374 ConnectionInfoData* info = new ConnectionInfoData{
1375 .addr = data->addr,
1376 .len = data->len,
1377 .isDeleted = nullptr,
1378 };
1379
1380 return ABinderRpc_Accessor_new(instance, infoProvider, info, infoProviderDataOnDelete);
1381 }
1382
1383 class BinderARpcNdk : public ::testing::Test {};
1384
TEST_F(BinderARpcNdk,ARpcProviderNewDelete)1385 TEST_F(BinderARpcNdk, ARpcProviderNewDelete) {
1386 bool isDeleted = false;
1387
1388 AccessorProviderData* data = new AccessorProviderData{{}, 0, &isDeleted};
1389
1390 ABinderRpc_AccessorProvider* provider =
1391 ABinderRpc_registerAccessorProvider(getAccessor, kARpcSupportedServices,
1392 kARpcNumSupportedServices, data,
1393 accessorProviderDataOnDelete);
1394
1395 ASSERT_NE(provider, nullptr);
1396 EXPECT_FALSE(isDeleted);
1397
1398 ABinderRpc_unregisterAccessorProvider(provider);
1399
1400 EXPECT_TRUE(isDeleted);
1401 }
1402
TEST_F(BinderARpcNdk,ARpcProviderDeleteOnError)1403 TEST_F(BinderARpcNdk, ARpcProviderDeleteOnError) {
1404 bool isDeleted = false;
1405 AccessorProviderData* data = new AccessorProviderData{{}, 0, &isDeleted};
1406
1407 ABinderRpc_AccessorProvider* provider =
1408 ABinderRpc_registerAccessorProvider(getAccessor, kARpcSupportedServices, 0, data,
1409 accessorProviderDataOnDelete);
1410
1411 ASSERT_EQ(provider, nullptr);
1412 EXPECT_TRUE(isDeleted);
1413 }
1414
TEST_F(BinderARpcNdk,ARpcProvideOnErrorNoDeleteCbNoCrash)1415 TEST_F(BinderARpcNdk, ARpcProvideOnErrorNoDeleteCbNoCrash) {
1416 ABinderRpc_AccessorProvider* provider =
1417 ABinderRpc_registerAccessorProvider(getAccessor, kARpcSupportedServices, 0, nullptr,
1418 nullptr);
1419
1420 ASSERT_EQ(provider, nullptr);
1421 }
1422
TEST_F(BinderARpcNdk,ARpcProviderDuplicateInstance)1423 TEST_F(BinderARpcNdk, ARpcProviderDuplicateInstance) {
1424 const char* instance = "some.instance.name.IFoo/default";
1425 const uint32_t numInstances = 2;
1426 const char* instances[numInstances] = {
1427 instance,
1428 "some.other.instance/default",
1429 };
1430
1431 bool isDeleted = false;
1432
1433 AccessorProviderData* data = new AccessorProviderData{{}, 0, &isDeleted};
1434
1435 ABinderRpc_AccessorProvider* provider =
1436 ABinderRpc_registerAccessorProvider(getAccessor, instances, numInstances, data,
1437 accessorProviderDataOnDelete);
1438
1439 ASSERT_NE(provider, nullptr);
1440 EXPECT_FALSE(isDeleted);
1441
1442 const uint32_t numInstances2 = 1;
1443 const char* instances2[numInstances2] = {
1444 instance,
1445 };
1446 bool isDeleted2 = false;
1447 AccessorProviderData* data2 = new AccessorProviderData{{}, 0, &isDeleted2};
1448 ABinderRpc_AccessorProvider* provider2 =
1449 ABinderRpc_registerAccessorProvider(getAccessor, instances2, numInstances2, data2,
1450 accessorProviderDataOnDelete);
1451
1452 EXPECT_EQ(provider2, nullptr);
1453 // If it fails to be registered, the data is still cleaned up with
1454 // accessorProviderDataOnDelete
1455 EXPECT_TRUE(isDeleted2);
1456
1457 ABinderRpc_unregisterAccessorProvider(provider);
1458
1459 EXPECT_TRUE(isDeleted);
1460 }
1461
TEST_F(BinderARpcNdk,ARpcProviderRegisterNoInstance)1462 TEST_F(BinderARpcNdk, ARpcProviderRegisterNoInstance) {
1463 const uint32_t numInstances = 0;
1464 const char* instances[numInstances] = {};
1465
1466 bool isDeleted = false;
1467 AccessorProviderData* data = new AccessorProviderData{{}, 0, &isDeleted};
1468
1469 ABinderRpc_AccessorProvider* provider =
1470 ABinderRpc_registerAccessorProvider(getAccessor, instances, numInstances, data,
1471 accessorProviderDataOnDelete);
1472 ASSERT_EQ(provider, nullptr);
1473 }
1474
TEST_F(BinderARpcNdk,ARpcAccessorNewDelete)1475 TEST_F(BinderARpcNdk, ARpcAccessorNewDelete) {
1476 bool isDeleted = false;
1477
1478 ConnectionInfoData* data = new ConnectionInfoData{{}, 0, &isDeleted};
1479
1480 ABinderRpc_Accessor* accessor =
1481 ABinderRpc_Accessor_new("gshoe_service", infoProvider, data, infoProviderDataOnDelete);
1482 ASSERT_NE(accessor, nullptr);
1483 EXPECT_FALSE(isDeleted);
1484
1485 ABinderRpc_Accessor_delete(accessor);
1486 EXPECT_TRUE(isDeleted);
1487 }
1488
TEST_F(BinderARpcNdk,ARpcConnectionInfoNewDelete)1489 TEST_F(BinderARpcNdk, ARpcConnectionInfoNewDelete) {
1490 sockaddr_vm addr{
1491 .svm_family = AF_VSOCK,
1492 .svm_port = VMADDR_PORT_ANY,
1493 .svm_cid = VMADDR_CID_ANY,
1494 };
1495
1496 ABinderRpc_ConnectionInfo* info =
1497 ABinderRpc_ConnectionInfo_new(reinterpret_cast<sockaddr*>(&addr), sizeof(sockaddr_vm));
1498 EXPECT_NE(info, nullptr);
1499
1500 ABinderRpc_ConnectionInfo_delete(info);
1501 }
1502
TEST_F(BinderARpcNdk,ARpcAsFromBinderAsBinder)1503 TEST_F(BinderARpcNdk, ARpcAsFromBinderAsBinder) {
1504 bool isDeleted = false;
1505
1506 ConnectionInfoData* data = new ConnectionInfoData{{}, 0, &isDeleted};
1507
1508 ABinderRpc_Accessor* accessor =
1509 ABinderRpc_Accessor_new("gshoe_service", infoProvider, data, infoProviderDataOnDelete);
1510 ASSERT_NE(accessor, nullptr);
1511 EXPECT_FALSE(isDeleted);
1512
1513 {
1514 ndk::SpAIBinder binder = ndk::SpAIBinder(ABinderRpc_Accessor_asBinder(accessor));
1515 EXPECT_NE(binder.get(), nullptr);
1516
1517 ABinderRpc_Accessor* accessor2 =
1518 ABinderRpc_Accessor_fromBinder("wrong_service_name", binder.get());
1519 // The API checks for the expected service name that is associated with
1520 // the accessor!
1521 EXPECT_EQ(accessor2, nullptr);
1522
1523 accessor2 = ABinderRpc_Accessor_fromBinder("gshoe_service", binder.get());
1524 EXPECT_NE(accessor2, nullptr);
1525
1526 // this is a new ABinderRpc_Accessor object that wraps the underlying
1527 // libbinder object.
1528 EXPECT_NE(accessor, accessor2);
1529
1530 ndk::SpAIBinder binder2 = ndk::SpAIBinder(ABinderRpc_Accessor_asBinder(accessor2));
1531 EXPECT_EQ(binder.get(), binder2.get());
1532
1533 ABinderRpc_Accessor_delete(accessor2);
1534 }
1535
1536 EXPECT_FALSE(isDeleted);
1537 ABinderRpc_Accessor_delete(accessor);
1538 EXPECT_TRUE(isDeleted);
1539 }
1540
TEST_F(BinderARpcNdk,ARpcRequireProviderOnDeleteCallback)1541 TEST_F(BinderARpcNdk, ARpcRequireProviderOnDeleteCallback) {
1542 EXPECT_EQ(nullptr,
1543 ABinderRpc_registerAccessorProvider(getAccessor, kARpcSupportedServices,
1544 kARpcNumSupportedServices,
1545 reinterpret_cast<void*>(1), nullptr));
1546 }
1547
TEST_F(BinderARpcNdk,ARpcRequireInfoOnDeleteCallback)1548 TEST_F(BinderARpcNdk, ARpcRequireInfoOnDeleteCallback) {
1549 EXPECT_EQ(nullptr,
1550 ABinderRpc_Accessor_new("the_best_service_name", infoProvider,
1551 reinterpret_cast<void*>(1), nullptr));
1552 }
1553
TEST_F(BinderARpcNdk,ARpcNoDataNoProviderOnDeleteCallback)1554 TEST_F(BinderARpcNdk, ARpcNoDataNoProviderOnDeleteCallback) {
1555 ABinderRpc_AccessorProvider* provider =
1556 ABinderRpc_registerAccessorProvider(getAccessor, kARpcSupportedServices,
1557 kARpcNumSupportedServices, nullptr, nullptr);
1558 ASSERT_NE(nullptr, provider);
1559 ABinderRpc_unregisterAccessorProvider(provider);
1560 }
1561
TEST_F(BinderARpcNdk,ARpcNoDataNoInfoOnDeleteCallback)1562 TEST_F(BinderARpcNdk, ARpcNoDataNoInfoOnDeleteCallback) {
1563 ABinderRpc_Accessor* accessor =
1564 ABinderRpc_Accessor_new("the_best_service_name", infoProvider, nullptr, nullptr);
1565 ASSERT_NE(nullptr, accessor);
1566 ABinderRpc_Accessor_delete(accessor);
1567 }
1568
TEST_F(BinderARpcNdk,ARpcNullArgs_ConnectionInfo_new)1569 TEST_F(BinderARpcNdk, ARpcNullArgs_ConnectionInfo_new) {
1570 sockaddr_storage addr;
1571 EXPECT_EQ(nullptr, ABinderRpc_ConnectionInfo_new(reinterpret_cast<const sockaddr*>(&addr), 0));
1572 }
1573
TEST_F(BinderARpcNdk,ARpcDelegateAccessorWrongInstance)1574 TEST_F(BinderARpcNdk, ARpcDelegateAccessorWrongInstance) {
1575 AccessorProviderData* data = new AccessorProviderData();
1576 ABinderRpc_Accessor* accessor = getAccessor(kARpcInstance, data);
1577 ASSERT_NE(accessor, nullptr);
1578 AIBinder* localAccessorBinder = ABinderRpc_Accessor_asBinder(accessor);
1579 EXPECT_NE(localAccessorBinder, nullptr);
1580
1581 AIBinder* delegatorBinder = nullptr;
1582 binder_status_t status =
1583 ABinderRpc_Accessor_delegateAccessor("bar", localAccessorBinder, &delegatorBinder);
1584 EXPECT_EQ(status, NAME_NOT_FOUND);
1585
1586 AIBinder_decStrong(localAccessorBinder);
1587 ABinderRpc_Accessor_delete(accessor);
1588 delete data;
1589 }
1590
TEST_F(BinderARpcNdk,ARpcDelegateNonAccessor)1591 TEST_F(BinderARpcNdk, ARpcDelegateNonAccessor) {
1592 auto service = defaultServiceManager()->checkService(String16(kKnownAidlService));
1593 ASSERT_NE(nullptr, service);
1594 ndk::SpAIBinder binder = ndk::SpAIBinder(AIBinder_fromPlatformBinder(service));
1595
1596 AIBinder* delegatorBinder = nullptr;
1597 binder_status_t status =
1598 ABinderRpc_Accessor_delegateAccessor("bar", binder.get(), &delegatorBinder);
1599
1600 EXPECT_EQ(status, BAD_TYPE);
1601 }
1602
getServiceTest(BinderRpcTestProcessSession & proc,ABinderRpc_AccessorProvider_getAccessorCallback getAccessor)1603 inline void getServiceTest(BinderRpcTestProcessSession& proc,
1604 ABinderRpc_AccessorProvider_getAccessorCallback getAccessor) {
1605 constexpr size_t kNumThreads = 10;
1606 bool isDeleted = false;
1607
1608 AccessorProviderData* data =
1609 new AccessorProviderData{proc.proc->sessions[0].addr, proc.proc->sessions[0].addrLen,
1610 &isDeleted};
1611 ABinderRpc_AccessorProvider* provider =
1612 ABinderRpc_registerAccessorProvider(getAccessor, kARpcSupportedServices,
1613 kARpcNumSupportedServices, data,
1614 accessorProviderDataOnDelete);
1615 EXPECT_NE(provider, nullptr);
1616 EXPECT_FALSE(isDeleted);
1617
1618 {
1619 ndk::SpAIBinder binder = ndk::SpAIBinder(AServiceManager_checkService(kARpcInstance));
1620 ASSERT_NE(binder.get(), nullptr);
1621 EXPECT_EQ(STATUS_OK, AIBinder_ping(binder.get()));
1622 }
1623
1624 ABinderRpc_unregisterAccessorProvider(provider);
1625 EXPECT_TRUE(isDeleted);
1626
1627 waitForExtraSessionCleanup(proc);
1628 }
1629
TEST_P(BinderRpcAccessor,ARpcGetService)1630 TEST_P(BinderRpcAccessor, ARpcGetService) {
1631 constexpr size_t kNumThreads = 10;
1632 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
1633 EXPECT_EQ(OK, proc.rootBinder->pingBinder());
1634
1635 getServiceTest(proc, getAccessor);
1636 }
1637
1638 // Create accessors and wrap each of the accessors in a delegator
getDelegatedAccessor(const char * instance,void * cookie)1639 ABinderRpc_Accessor* getDelegatedAccessor(const char* instance, void* cookie) {
1640 ABinderRpc_Accessor* accessor = getAccessor(instance, cookie);
1641 AIBinder* accessorBinder = ABinderRpc_Accessor_asBinder(accessor);
1642 // Once we have a handle to the AIBinder which holds a reference to the
1643 // underlying accessor IBinder, we can get rid of the ABinderRpc_Accessor
1644 ABinderRpc_Accessor_delete(accessor);
1645
1646 AIBinder* delegatorBinder = nullptr;
1647 binder_status_t status =
1648 ABinderRpc_Accessor_delegateAccessor(instance, accessorBinder, &delegatorBinder);
1649 // No longer need this AIBinder. The delegator has a reference to the
1650 // underlying IBinder on success, and on failure we are done here.
1651 AIBinder_decStrong(accessorBinder);
1652 if (status != OK || delegatorBinder == nullptr) {
1653 ALOGE("Unexpected behavior. Status: %s, delegator ptr: %p", statusToString(status).c_str(),
1654 delegatorBinder);
1655 return nullptr;
1656 }
1657
1658 return ABinderRpc_Accessor_fromBinder(instance, delegatorBinder);
1659 }
1660
TEST_P(BinderRpcAccessor,ARpcGetServiceWithDelegator)1661 TEST_P(BinderRpcAccessor, ARpcGetServiceWithDelegator) {
1662 constexpr size_t kNumThreads = 10;
1663 auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
1664 EXPECT_EQ(OK, proc.rootBinder->pingBinder());
1665
1666 getServiceTest(proc, getDelegatedAccessor);
1667 }
1668
1669 #endif // BINDER_WITH_KERNEL_IPC
1670
1671 #ifdef BINDER_RPC_TO_TRUSTY_TEST
1672
getTrustyBinderRpcParams()1673 static std::vector<BinderRpc::ParamType> getTrustyBinderRpcParams() {
1674 std::vector<BinderRpc::ParamType> ret;
1675
1676 for (const auto& clientVersion : testVersions()) {
1677 for (const auto& serverVersion : testVersions()) {
1678 ret.push_back(BinderRpc::ParamType{
1679 .type = SocketType::TIPC,
1680 .security = RpcSecurity::RAW,
1681 .clientVersion = clientVersion,
1682 .serverVersion = serverVersion,
1683 .singleThreaded = true,
1684 .noKernel = true,
1685 });
1686 }
1687 }
1688
1689 return ret;
1690 }
1691
1692 INSTANTIATE_TEST_SUITE_P(Trusty, BinderRpc, ::testing::ValuesIn(getTrustyBinderRpcParams()),
1693 BinderRpc::PrintParamInfo);
1694 #else // BINDER_RPC_TO_TRUSTY_TEST
testSupportVsockLoopback()1695 bool testSupportVsockLoopback() {
1696 // We don't need to enable TLS to know if vsock is supported.
1697 unique_fd serverFd(
1698 TEMP_FAILURE_RETRY(socket(AF_VSOCK, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)));
1699
1700 if (errno == EAFNOSUPPORT) {
1701 return false;
1702 }
1703
1704 LOG_ALWAYS_FATAL_IF(!serverFd.ok(), "Could not create socket: %s", strerror(errno));
1705
1706 sockaddr_vm serverAddr{
1707 .svm_family = AF_VSOCK,
1708 .svm_port = VMADDR_PORT_ANY,
1709 .svm_cid = VMADDR_CID_ANY,
1710 };
1711 int ret = TEMP_FAILURE_RETRY(
1712 bind(serverFd.get(), reinterpret_cast<sockaddr*>(&serverAddr), sizeof(serverAddr)));
1713 LOG_ALWAYS_FATAL_IF(0 != ret, "Could not bind socket to port VMADDR_PORT_ANY: %s",
1714 strerror(errno));
1715
1716 socklen_t len = sizeof(serverAddr);
1717 ret = getsockname(serverFd.get(), reinterpret_cast<sockaddr*>(&serverAddr), &len);
1718 LOG_ALWAYS_FATAL_IF(0 != ret, "Failed to getsockname: %s", strerror(errno));
1719 LOG_ALWAYS_FATAL_IF(len < static_cast<socklen_t>(sizeof(serverAddr)),
1720 "getsockname didn't read the full addr struct");
1721
1722 ret = TEMP_FAILURE_RETRY(listen(serverFd.get(), 1 /*backlog*/));
1723 LOG_ALWAYS_FATAL_IF(0 != ret, "Could not listen socket on port %u: %s", serverAddr.svm_port,
1724 strerror(errno));
1725
1726 // Try to connect to the server using the VMADDR_CID_LOCAL cid
1727 // to see if the kernel supports it. It's safe to use a blocking
1728 // connect because vsock sockets have a 2 second connection timeout,
1729 // and they return ETIMEDOUT after that.
1730 unique_fd connectFd(
1731 TEMP_FAILURE_RETRY(socket(AF_VSOCK, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)));
1732 LOG_ALWAYS_FATAL_IF(!connectFd.ok(), "Could not create socket for port %u: %s",
1733 serverAddr.svm_port, strerror(errno));
1734
1735 bool success = false;
1736 sockaddr_vm connectAddr{
1737 .svm_family = AF_VSOCK,
1738 .svm_port = serverAddr.svm_port,
1739 .svm_cid = VMADDR_CID_LOCAL,
1740 };
1741 ret = TEMP_FAILURE_RETRY(connect(connectFd.get(), reinterpret_cast<sockaddr*>(&connectAddr),
1742 sizeof(connectAddr)));
1743 if (ret != 0 && (errno == EAGAIN || errno == EINPROGRESS)) {
1744 unique_fd acceptFd;
1745 while (true) {
1746 pollfd pfd[]{
1747 {.fd = serverFd.get(), .events = POLLIN, .revents = 0},
1748 {.fd = connectFd.get(), .events = POLLOUT, .revents = 0},
1749 };
1750 ret = TEMP_FAILURE_RETRY(poll(pfd, countof(pfd), -1));
1751 LOG_ALWAYS_FATAL_IF(ret < 0, "Error polling: %s", strerror(errno));
1752
1753 if (pfd[0].revents & POLLIN) {
1754 sockaddr_vm acceptAddr;
1755 socklen_t acceptAddrLen = sizeof(acceptAddr);
1756 ret = TEMP_FAILURE_RETRY(accept4(serverFd.get(),
1757 reinterpret_cast<sockaddr*>(&acceptAddr),
1758 &acceptAddrLen, SOCK_CLOEXEC));
1759 LOG_ALWAYS_FATAL_IF(ret < 0, "Could not accept4 socket: %s", strerror(errno));
1760 LOG_ALWAYS_FATAL_IF(acceptAddrLen != static_cast<socklen_t>(sizeof(acceptAddr)),
1761 "Truncated address");
1762
1763 // Store the fd in acceptFd so we keep the connection alive
1764 // while polling connectFd
1765 acceptFd.reset(ret);
1766 }
1767
1768 if (pfd[1].revents & POLLOUT) {
1769 // Connect either succeeded or timed out
1770 int connectErrno;
1771 socklen_t connectErrnoLen = sizeof(connectErrno);
1772 int ret = getsockopt(connectFd.get(), SOL_SOCKET, SO_ERROR, &connectErrno,
1773 &connectErrnoLen);
1774 LOG_ALWAYS_FATAL_IF(ret == -1,
1775 "Could not getsockopt() after connect() "
1776 "on non-blocking socket: %s.",
1777 strerror(errno));
1778
1779 // We're done, this is all we wanted
1780 success = connectErrno == 0;
1781 break;
1782 }
1783 }
1784 } else {
1785 success = ret == 0;
1786 }
1787
1788 ALOGE("Detected vsock loopback supported: %s", success ? "yes" : "no");
1789
1790 return success;
1791 }
1792
testSocketTypes(bool hasPreconnected=true)1793 static std::vector<SocketType> testSocketTypes(bool hasPreconnected = true) {
1794 std::vector<SocketType> ret = {SocketType::UNIX, SocketType::UNIX_BOOTSTRAP, SocketType::INET,
1795 SocketType::UNIX_RAW};
1796
1797 if (hasPreconnected) ret.push_back(SocketType::PRECONNECTED);
1798
1799 #ifdef __BIONIC__
1800 // Devices may not have vsock support. AVF tests will verify whether they do, but
1801 // we can't require it due to old kernels for the time being.
1802 static bool hasVsockLoopback = testSupportVsockLoopback();
1803 #else
1804 // On host machines, we always assume we have vsock loopback. If we don't, the
1805 // subsequent failures will be more clear than showing one now.
1806 static bool hasVsockLoopback = true;
1807 #endif
1808
1809 if (hasVsockLoopback) {
1810 ret.push_back(SocketType::VSOCK);
1811 }
1812
1813 return ret;
1814 }
1815
getBinderRpcParams()1816 static std::vector<BinderRpc::ParamType> getBinderRpcParams() {
1817 std::vector<BinderRpc::ParamType> ret;
1818
1819 constexpr bool full = false;
1820
1821 for (const auto& type : testSocketTypes()) {
1822 if (full || type == SocketType::UNIX) {
1823 for (const auto& security : RpcSecurityValues()) {
1824 for (const auto& clientVersion : testVersions()) {
1825 for (const auto& serverVersion : testVersions()) {
1826 for (bool singleThreaded : {false, true}) {
1827 for (bool noKernel : noKernelValues()) {
1828 ret.push_back(BinderRpc::ParamType{
1829 .type = type,
1830 .security = security,
1831 .clientVersion = clientVersion,
1832 .serverVersion = serverVersion,
1833 .singleThreaded = singleThreaded,
1834 .noKernel = noKernel,
1835 });
1836 }
1837 }
1838 }
1839 }
1840 }
1841 } else {
1842 ret.push_back(BinderRpc::ParamType{
1843 .type = type,
1844 .security = RpcSecurity::RAW,
1845 .clientVersion = RPC_WIRE_PROTOCOL_VERSION,
1846 .serverVersion = RPC_WIRE_PROTOCOL_VERSION,
1847 .singleThreaded = false,
1848 .noKernel = !kEnableKernelIpcTesting,
1849 });
1850 }
1851 }
1852
1853 return ret;
1854 }
1855
1856 INSTANTIATE_TEST_SUITE_P(PerSocket, BinderRpc, ::testing::ValuesIn(getBinderRpcParams()),
1857 BinderRpc::PrintParamInfo);
1858
1859 #ifdef BINDER_WITH_KERNEL_IPC
1860 INSTANTIATE_TEST_SUITE_P(PerSocket, BinderRpcAccessor, ::testing::ValuesIn(getBinderRpcParams()),
1861 BinderRpc::PrintParamInfo);
1862 #endif // BINDER_WITH_KERNEL_IPC
1863
1864 class BinderRpcServerRootObject
1865 : public ::testing::TestWithParam<std::tuple<bool, bool, RpcSecurity>> {};
1866
TEST_P(BinderRpcServerRootObject,WeakRootObject)1867 TEST_P(BinderRpcServerRootObject, WeakRootObject) {
1868 using SetFn = std::function<void(RpcServer*, sp<IBinder>)>;
1869 auto setRootObject = [](bool isStrong) -> SetFn {
1870 return isStrong ? SetFn(&RpcServer::setRootObject) : SetFn(&RpcServer::setRootObjectWeak);
1871 };
1872
1873 auto [isStrong1, isStrong2, rpcSecurity] = GetParam();
1874 auto server = RpcServer::make(newTlsFactory(rpcSecurity));
1875 auto binder1 = sp<BBinder>::make();
1876 IBinder* binderRaw1 = binder1.get();
1877 setRootObject(isStrong1)(server.get(), binder1);
1878 EXPECT_EQ(binderRaw1, server->getRootObject());
1879 binder1.clear();
1880 EXPECT_EQ((isStrong1 ? binderRaw1 : nullptr), server->getRootObject());
1881
1882 auto binder2 = sp<BBinder>::make();
1883 IBinder* binderRaw2 = binder2.get();
1884 setRootObject(isStrong2)(server.get(), binder2);
1885 EXPECT_EQ(binderRaw2, server->getRootObject());
1886 binder2.clear();
1887 EXPECT_EQ((isStrong2 ? binderRaw2 : nullptr), server->getRootObject());
1888 }
1889
1890 INSTANTIATE_TEST_SUITE_P(BinderRpc, BinderRpcServerRootObject,
1891 ::testing::Combine(::testing::Bool(), ::testing::Bool(),
1892 ::testing::ValuesIn(RpcSecurityValues())));
1893
1894 class OneOffSignal {
1895 public:
1896 // If notify() was previously called, or is called within |duration|, return true; else false.
1897 template <typename R, typename P>
wait(std::chrono::duration<R,P> duration)1898 bool wait(std::chrono::duration<R, P> duration) {
1899 std::unique_lock<std::mutex> lock(mMutex);
1900 return mCv.wait_for(lock, duration, [this] { return mValue; });
1901 }
notify()1902 void notify() {
1903 std::unique_lock<std::mutex> lock(mMutex);
1904 mValue = true;
1905 lock.unlock();
1906 mCv.notify_all();
1907 }
1908
1909 private:
1910 std::mutex mMutex;
1911 std::condition_variable mCv;
1912 bool mValue = false;
1913 };
1914
TEST(BinderRpc,Java)1915 TEST(BinderRpc, Java) {
1916 bool expectDebuggable = false;
1917 #if defined(__ANDROID__)
1918 expectDebuggable = android::base::GetBoolProperty("ro.debuggable", false) &&
1919 android::base::GetProperty("ro.build.type", "") != "user";
1920 #else
1921 GTEST_SKIP() << "This test is only run on Android. Though it can technically run on host on"
1922 "createRpcDelegateServiceManager() with a device attached, such test belongs "
1923 "to binderHostDeviceTest. Hence, just disable this test on host.";
1924 #endif // !__ANDROID__
1925 if constexpr (!kEnableKernelIpc) {
1926 GTEST_SKIP() << "Test disabled because Binder kernel driver was disabled "
1927 "at build time.";
1928 }
1929
1930 sp<IServiceManager> sm = defaultServiceManager();
1931 ASSERT_NE(nullptr, sm);
1932 // Any Java service with non-empty getInterfaceDescriptor() would do.
1933 // Let's pick activity.
1934 auto binder = sm->checkService(String16(kKnownAidlService));
1935 ASSERT_NE(nullptr, binder);
1936 auto descriptor = binder->getInterfaceDescriptor();
1937 ASSERT_GE(descriptor.size(), 0u);
1938 ASSERT_EQ(OK, binder->pingBinder());
1939
1940 auto rpcServer = RpcServer::make();
1941 unsigned int port;
1942 ASSERT_EQ(OK, rpcServer->setupInetServer(kLocalInetAddress, 0, &port));
1943 auto socket = rpcServer->releaseServer();
1944
1945 auto keepAlive = sp<BBinder>::make();
1946 auto setRpcClientDebugStatus = binder->setRpcClientDebug(std::move(socket), keepAlive);
1947
1948 if (!expectDebuggable) {
1949 ASSERT_EQ(INVALID_OPERATION, setRpcClientDebugStatus)
1950 << "setRpcClientDebug should return INVALID_OPERATION on non-debuggable or user "
1951 "builds, but get "
1952 << statusToString(setRpcClientDebugStatus);
1953 GTEST_SKIP();
1954 }
1955
1956 ASSERT_EQ(OK, setRpcClientDebugStatus);
1957
1958 auto rpcSession = RpcSession::make();
1959 ASSERT_EQ(OK, rpcSession->setupInetClient("127.0.0.1", port));
1960 auto rpcBinder = rpcSession->getRootObject();
1961 ASSERT_NE(nullptr, rpcBinder);
1962
1963 ASSERT_EQ(OK, rpcBinder->pingBinder());
1964
1965 ASSERT_EQ(descriptor, rpcBinder->getInterfaceDescriptor())
1966 << "getInterfaceDescriptor should not crash system_server";
1967 ASSERT_EQ(OK, rpcBinder->pingBinder());
1968 }
1969
1970 class BinderRpcServerOnly : public ::testing::TestWithParam<std::tuple<RpcSecurity, uint32_t>> {
1971 public:
PrintTestParam(const::testing::TestParamInfo<ParamType> & info)1972 static std::string PrintTestParam(const ::testing::TestParamInfo<ParamType>& info) {
1973 return std::string(newTlsFactory(std::get<0>(info.param))->toCString()) + "_serverV" +
1974 std::to_string(std::get<1>(info.param));
1975 }
1976 };
1977
TEST_P(BinderRpcServerOnly,SetExternalServerTest)1978 TEST_P(BinderRpcServerOnly, SetExternalServerTest) {
1979 unique_fd sink(TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR)));
1980 int sinkFd = sink.get();
1981 auto server = RpcServer::make(newTlsFactory(std::get<0>(GetParam())));
1982 ASSERT_TRUE(server->setProtocolVersion(std::get<1>(GetParam())));
1983 ASSERT_FALSE(server->hasServer());
1984 ASSERT_EQ(OK, server->setupExternalServer(std::move(sink)));
1985 ASSERT_TRUE(server->hasServer());
1986 unique_fd retrieved = server->releaseServer();
1987 ASSERT_FALSE(server->hasServer());
1988 ASSERT_EQ(sinkFd, retrieved.get());
1989 }
1990
TEST_P(BinderRpcServerOnly,Shutdown)1991 TEST_P(BinderRpcServerOnly, Shutdown) {
1992 if constexpr (!kEnableRpcThreads) {
1993 GTEST_SKIP() << "Test skipped because threads were disabled at build time";
1994 }
1995
1996 auto addr = allocateSocketAddress();
1997 auto server = RpcServer::make(newTlsFactory(std::get<0>(GetParam())));
1998 ASSERT_TRUE(server->setProtocolVersion(std::get<1>(GetParam())));
1999 ASSERT_EQ(OK, server->setupUnixDomainServer(addr.c_str()));
2000 auto joinEnds = std::make_shared<OneOffSignal>();
2001
2002 // If things are broken and the thread never stops, don't block other tests. Because the thread
2003 // may run after the test finishes, it must not access the stack memory of the test. Hence,
2004 // shared pointers are passed.
2005 std::thread([server, joinEnds] {
2006 server->join();
2007 joinEnds->notify();
2008 }).detach();
2009
2010 bool shutdown = false;
2011 for (int i = 0; i < 10 && !shutdown; i++) {
2012 usleep(30 * 1000); // 30ms; total 300ms
2013 if (server->shutdown()) shutdown = true;
2014 }
2015 ASSERT_TRUE(shutdown) << "server->shutdown() never returns true";
2016
2017 ASSERT_TRUE(joinEnds->wait(2s))
2018 << "After server->shutdown() returns true, join() did not stop after 2s";
2019 }
2020
2021 INSTANTIATE_TEST_SUITE_P(BinderRpc, BinderRpcServerOnly,
2022 ::testing::Combine(::testing::ValuesIn(RpcSecurityValues()),
2023 ::testing::ValuesIn(testVersions())),
2024 BinderRpcServerOnly::PrintTestParam);
2025
2026 class RpcTransportTestUtils {
2027 public:
2028 // Only parameterized only server version because `RpcSession` is bypassed
2029 // in the client half of the tests.
2030 using Param =
2031 std::tuple<SocketType, RpcSecurity, std::optional<RpcCertificateFormat>, uint32_t>;
2032 using ConnectToServer = std::function<unique_fd()>;
2033
2034 // A server that handles client socket connections.
2035 class Server {
2036 public:
2037 using AcceptConnection = std::function<unique_fd(Server*)>;
2038
Server()2039 explicit Server() {}
2040 Server(Server&&) = default;
~Server()2041 ~Server() { shutdownAndWait(); }
setUp(const Param & param,std::unique_ptr<RpcAuth> auth=std::make_unique<RpcAuthSelfSigned> ())2042 [[nodiscard]] AssertionResult setUp(
2043 const Param& param,
2044 std::unique_ptr<RpcAuth> auth = std::make_unique<RpcAuthSelfSigned>()) {
2045 auto [socketType, rpcSecurity, certificateFormat, serverVersion] = param;
2046 auto rpcServer = RpcServer::make(newTlsFactory(rpcSecurity));
2047 if (!rpcServer->setProtocolVersion(serverVersion)) {
2048 return AssertionFailure() << "Invalid protocol version: " << serverVersion;
2049 }
2050 switch (socketType) {
2051 case SocketType::PRECONNECTED: {
2052 return AssertionFailure() << "Not supported by this test";
2053 } break;
2054 case SocketType::UNIX: {
2055 auto addr = allocateSocketAddress();
2056 auto status = rpcServer->setupUnixDomainServer(addr.c_str());
2057 if (status != OK) {
2058 return AssertionFailure()
2059 << "setupUnixDomainServer: " << statusToString(status);
2060 }
2061 mConnectToServer = [addr] {
2062 return connectTo(UnixSocketAddress(addr.c_str()));
2063 };
2064 } break;
2065 case SocketType::UNIX_BOOTSTRAP: {
2066 unique_fd bootstrapFdClient, bootstrapFdServer;
2067 if (!binder::Socketpair(SOCK_STREAM, &bootstrapFdClient, &bootstrapFdServer)) {
2068 return AssertionFailure() << "Socketpair() failed";
2069 }
2070 auto status = rpcServer->setupUnixDomainSocketBootstrapServer(
2071 std::move(bootstrapFdServer));
2072 if (status != OK) {
2073 return AssertionFailure() << "setupUnixDomainSocketBootstrapServer: "
2074 << statusToString(status);
2075 }
2076 mBootstrapSocket = RpcTransportFd(std::move(bootstrapFdClient));
2077 mAcceptConnection = &Server::recvmsgServerConnection;
2078 mConnectToServer = [this] { return connectToUnixBootstrap(mBootstrapSocket); };
2079 } break;
2080 case SocketType::UNIX_RAW: {
2081 auto addr = allocateSocketAddress();
2082 auto status = rpcServer->setupRawSocketServer(initUnixSocket(addr));
2083 if (status != OK) {
2084 return AssertionFailure()
2085 << "setupRawSocketServer: " << statusToString(status);
2086 }
2087 mConnectToServer = [addr] {
2088 return connectTo(UnixSocketAddress(addr.c_str()));
2089 };
2090 } break;
2091 case SocketType::VSOCK: {
2092 unsigned port;
2093 auto status =
2094 rpcServer->setupVsockServer(VMADDR_CID_LOCAL, VMADDR_PORT_ANY, &port);
2095 if (status != OK) {
2096 return AssertionFailure() << "setupVsockServer: " << statusToString(status);
2097 }
2098 mConnectToServer = [port] {
2099 return connectTo(VsockSocketAddress(VMADDR_CID_LOCAL, port));
2100 };
2101 } break;
2102 case SocketType::INET: {
2103 unsigned int port;
2104 auto status = rpcServer->setupInetServer(kLocalInetAddress, 0, &port);
2105 if (status != OK) {
2106 return AssertionFailure() << "setupInetServer: " << statusToString(status);
2107 }
2108 mConnectToServer = [port] {
2109 const char* addr = kLocalInetAddress;
2110 auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
2111 if (aiStart == nullptr) return unique_fd{};
2112 for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
2113 auto fd = connectTo(
2114 InetSocketAddress(ai->ai_addr, ai->ai_addrlen, addr, port));
2115 if (fd.ok()) return fd;
2116 }
2117 ALOGE("None of the socket address resolved for %s:%u can be connected",
2118 addr, port);
2119 return unique_fd{};
2120 };
2121 } break;
2122 case SocketType::TIPC: {
2123 LOG_ALWAYS_FATAL("RpcTransportTest should not be enabled for TIPC");
2124 } break;
2125 }
2126 mFd = rpcServer->releaseServer();
2127 if (!mFd.fd.ok()) return AssertionFailure() << "releaseServer returns invalid fd";
2128 mCtx = newTlsFactory(rpcSecurity, mCertVerifier, std::move(auth))->newServerCtx();
2129 if (mCtx == nullptr) return AssertionFailure() << "newServerCtx";
2130 mSetup = true;
2131 return AssertionSuccess();
2132 }
getCtx() const2133 RpcTransportCtx* getCtx() const { return mCtx.get(); }
getCertVerifier() const2134 std::shared_ptr<RpcCertificateVerifierSimple> getCertVerifier() const {
2135 return mCertVerifier;
2136 }
getConnectToServerFn()2137 ConnectToServer getConnectToServerFn() { return mConnectToServer; }
start()2138 void start() {
2139 LOG_ALWAYS_FATAL_IF(!mSetup, "Call Server::setup first!");
2140 mThread = std::make_unique<std::thread>(&Server::run, this);
2141 }
2142
acceptServerConnection()2143 unique_fd acceptServerConnection() {
2144 return unique_fd(TEMP_FAILURE_RETRY(
2145 accept4(mFd.fd.get(), nullptr, nullptr, SOCK_CLOEXEC | SOCK_NONBLOCK)));
2146 }
2147
recvmsgServerConnection()2148 unique_fd recvmsgServerConnection() {
2149 std::vector<std::variant<unique_fd, borrowed_fd>> fds;
2150 int buf;
2151 iovec iov{&buf, sizeof(buf)};
2152
2153 if (binder::os::receiveMessageFromSocket(mFd, &iov, 1, &fds) < 0) {
2154 PLOGF("Failed receiveMessage");
2155 }
2156 LOG_ALWAYS_FATAL_IF(fds.size() != 1, "Expected one FD from receiveMessage(), got %zu",
2157 fds.size());
2158 return std::move(std::get<unique_fd>(fds[0]));
2159 }
2160
run()2161 void run() {
2162 LOG_ALWAYS_FATAL_IF(!mSetup, "Call Server::setup first!");
2163
2164 std::vector<std::thread> threads;
2165 while (OK == mFdTrigger->triggerablePoll(mFd, POLLIN)) {
2166 unique_fd acceptedFd = mAcceptConnection(this);
2167 threads.emplace_back(&Server::handleOne, this, std::move(acceptedFd));
2168 }
2169
2170 for (auto& thread : threads) thread.join();
2171 }
handleOne(unique_fd acceptedFd)2172 void handleOne(unique_fd acceptedFd) {
2173 ASSERT_TRUE(acceptedFd.ok());
2174 RpcTransportFd transportFd(std::move(acceptedFd));
2175 auto serverTransport = mCtx->newTransport(std::move(transportFd), mFdTrigger.get());
2176 if (serverTransport == nullptr) return; // handshake failed
2177 ASSERT_TRUE(mPostConnect(serverTransport.get(), mFdTrigger.get()));
2178 }
shutdownAndWait()2179 void shutdownAndWait() {
2180 shutdown();
2181 join();
2182 }
shutdown()2183 void shutdown() { mFdTrigger->trigger(); }
2184
setPostConnect(std::function<AssertionResult (RpcTransport *,FdTrigger * fdTrigger)> fn)2185 void setPostConnect(
2186 std::function<AssertionResult(RpcTransport*, FdTrigger* fdTrigger)> fn) {
2187 mPostConnect = std::move(fn);
2188 }
2189
2190 private:
2191 std::unique_ptr<std::thread> mThread;
2192 ConnectToServer mConnectToServer;
2193 AcceptConnection mAcceptConnection = &Server::acceptServerConnection;
2194 std::unique_ptr<FdTrigger> mFdTrigger = FdTrigger::make();
2195 RpcTransportFd mFd, mBootstrapSocket;
2196 std::unique_ptr<RpcTransportCtx> mCtx;
2197 std::shared_ptr<RpcCertificateVerifierSimple> mCertVerifier =
2198 std::make_shared<RpcCertificateVerifierSimple>();
2199 bool mSetup = false;
2200 // The function invoked after connection and handshake. By default, it is
2201 // |defaultPostConnect| that sends |kMessage| to the client.
2202 std::function<AssertionResult(RpcTransport*, FdTrigger* fdTrigger)> mPostConnect =
2203 Server::defaultPostConnect;
2204
join()2205 void join() {
2206 if (mThread != nullptr) {
2207 mThread->join();
2208 mThread = nullptr;
2209 }
2210 }
2211
defaultPostConnect(RpcTransport * serverTransport,FdTrigger * fdTrigger)2212 static AssertionResult defaultPostConnect(RpcTransport* serverTransport,
2213 FdTrigger* fdTrigger) {
2214 std::string message(kMessage);
2215 iovec messageIov{message.data(), message.size()};
2216 auto status = serverTransport->interruptableWriteFully(fdTrigger, &messageIov, 1,
2217 std::nullopt, nullptr);
2218 if (status != OK) return AssertionFailure() << statusToString(status);
2219 return AssertionSuccess();
2220 }
2221 };
2222
2223 class Client {
2224 public:
Client(ConnectToServer connectToServer)2225 explicit Client(ConnectToServer connectToServer) : mConnectToServer(connectToServer) {}
2226 Client(Client&&) = default;
setUp(const Param & param)2227 [[nodiscard]] AssertionResult setUp(const Param& param) {
2228 auto [socketType, rpcSecurity, certificateFormat, serverVersion] = param;
2229 (void)serverVersion;
2230 mFdTrigger = FdTrigger::make();
2231 mCtx = newTlsFactory(rpcSecurity, mCertVerifier)->newClientCtx();
2232 if (mCtx == nullptr) return AssertionFailure() << "newClientCtx";
2233 return AssertionSuccess();
2234 }
getCtx() const2235 RpcTransportCtx* getCtx() const { return mCtx.get(); }
getCertVerifier() const2236 std::shared_ptr<RpcCertificateVerifierSimple> getCertVerifier() const {
2237 return mCertVerifier;
2238 }
2239 // connect() and do handshake
setUpTransport()2240 bool setUpTransport() {
2241 mFd = mConnectToServer();
2242 if (!mFd.fd.ok()) return AssertionFailure() << "Cannot connect to server";
2243 mClientTransport = mCtx->newTransport(std::move(mFd), mFdTrigger.get());
2244 return mClientTransport != nullptr;
2245 }
readMessage(const std::string & expectedMessage=kMessage)2246 AssertionResult readMessage(const std::string& expectedMessage = kMessage) {
2247 LOG_ALWAYS_FATAL_IF(mClientTransport == nullptr, "setUpTransport not called or failed");
2248 std::string readMessage(expectedMessage.size(), '\0');
2249 iovec readMessageIov{readMessage.data(), readMessage.size()};
2250 status_t readStatus =
2251 mClientTransport->interruptableReadFully(mFdTrigger.get(), &readMessageIov, 1,
2252 std::nullopt, nullptr);
2253 if (readStatus != OK) {
2254 return AssertionFailure() << statusToString(readStatus);
2255 }
2256 if (readMessage != expectedMessage) {
2257 return AssertionFailure()
2258 << "Expected " << expectedMessage << ", actual " << readMessage;
2259 }
2260 return AssertionSuccess();
2261 }
run(bool handshakeOk=true,bool readOk=true)2262 void run(bool handshakeOk = true, bool readOk = true) {
2263 if (!setUpTransport()) {
2264 ASSERT_FALSE(handshakeOk) << "newTransport returns nullptr, but it shouldn't";
2265 return;
2266 }
2267 ASSERT_TRUE(handshakeOk) << "newTransport does not return nullptr, but it should";
2268 ASSERT_EQ(readOk, readMessage());
2269 }
2270
isTransportWaiting()2271 bool isTransportWaiting() { return mClientTransport->isWaiting(); }
2272
2273 private:
2274 ConnectToServer mConnectToServer;
2275 RpcTransportFd mFd;
2276 std::unique_ptr<FdTrigger> mFdTrigger = FdTrigger::make();
2277 std::unique_ptr<RpcTransportCtx> mCtx;
2278 std::shared_ptr<RpcCertificateVerifierSimple> mCertVerifier =
2279 std::make_shared<RpcCertificateVerifierSimple>();
2280 std::unique_ptr<RpcTransport> mClientTransport;
2281 };
2282
2283 // Make A trust B.
2284 template <typename A, typename B>
trust(RpcSecurity rpcSecurity,std::optional<RpcCertificateFormat> certificateFormat,const A & a,const B & b)2285 static status_t trust(RpcSecurity rpcSecurity,
2286 std::optional<RpcCertificateFormat> certificateFormat, const A& a,
2287 const B& b) {
2288 if (rpcSecurity != RpcSecurity::TLS) return OK;
2289 LOG_ALWAYS_FATAL_IF(!certificateFormat.has_value());
2290 auto bCert = b->getCtx()->getCertificate(*certificateFormat);
2291 return a->getCertVerifier()->addTrustedPeerCertificate(*certificateFormat, bCert);
2292 }
2293
2294 static constexpr const char* kMessage = "hello";
2295 };
2296
2297 class RpcTransportTest : public testing::TestWithParam<RpcTransportTestUtils::Param> {
2298 public:
2299 using Server = RpcTransportTestUtils::Server;
2300 using Client = RpcTransportTestUtils::Client;
PrintParamInfo(const testing::TestParamInfo<ParamType> & info)2301 static inline std::string PrintParamInfo(const testing::TestParamInfo<ParamType>& info) {
2302 auto [socketType, rpcSecurity, certificateFormat, serverVersion] = info.param;
2303 auto ret = PrintToString(socketType) + "_" + newTlsFactory(rpcSecurity)->toCString();
2304 if (certificateFormat.has_value()) ret += "_" + PrintToString(*certificateFormat);
2305 ret += "_serverV" + std::to_string(serverVersion);
2306 return ret;
2307 }
getRpcTranportTestParams()2308 static std::vector<ParamType> getRpcTranportTestParams() {
2309 std::vector<ParamType> ret;
2310 for (auto serverVersion : testVersions()) {
2311 for (auto socketType : testSocketTypes(false /* hasPreconnected */)) {
2312 for (auto rpcSecurity : RpcSecurityValues()) {
2313 switch (rpcSecurity) {
2314 case RpcSecurity::RAW: {
2315 ret.emplace_back(socketType, rpcSecurity, std::nullopt, serverVersion);
2316 } break;
2317 case RpcSecurity::TLS: {
2318 ret.emplace_back(socketType, rpcSecurity, RpcCertificateFormat::PEM,
2319 serverVersion);
2320 ret.emplace_back(socketType, rpcSecurity, RpcCertificateFormat::DER,
2321 serverVersion);
2322 } break;
2323 }
2324 }
2325 }
2326 }
2327 return ret;
2328 }
2329 template <typename A, typename B>
trust(const A & a,const B & b)2330 status_t trust(const A& a, const B& b) {
2331 auto [socketType, rpcSecurity, certificateFormat, serverVersion] = GetParam();
2332 (void)serverVersion;
2333 return RpcTransportTestUtils::trust(rpcSecurity, certificateFormat, a, b);
2334 }
SetUp()2335 void SetUp() override {
2336 if constexpr (!kEnableRpcThreads) {
2337 GTEST_SKIP() << "Test skipped because threads were disabled at build time";
2338 }
2339 }
2340 };
2341
TEST_P(RpcTransportTest,GoodCertificate)2342 TEST_P(RpcTransportTest, GoodCertificate) {
2343 auto server = std::make_unique<Server>();
2344 ASSERT_TRUE(server->setUp(GetParam()));
2345
2346 Client client(server->getConnectToServerFn());
2347 ASSERT_TRUE(client.setUp(GetParam()));
2348
2349 ASSERT_EQ(OK, trust(&client, server));
2350 ASSERT_EQ(OK, trust(server, &client));
2351
2352 server->start();
2353 client.run();
2354 }
2355
TEST_P(RpcTransportTest,MultipleClients)2356 TEST_P(RpcTransportTest, MultipleClients) {
2357 auto server = std::make_unique<Server>();
2358 ASSERT_TRUE(server->setUp(GetParam()));
2359
2360 std::vector<Client> clients;
2361 for (int i = 0; i < 2; i++) {
2362 auto& client = clients.emplace_back(server->getConnectToServerFn());
2363 ASSERT_TRUE(client.setUp(GetParam()));
2364 ASSERT_EQ(OK, trust(&client, server));
2365 ASSERT_EQ(OK, trust(server, &client));
2366 }
2367
2368 server->start();
2369 for (auto& client : clients) client.run();
2370 }
2371
TEST_P(RpcTransportTest,UntrustedServer)2372 TEST_P(RpcTransportTest, UntrustedServer) {
2373 auto [socketType, rpcSecurity, certificateFormat, serverVersion] = GetParam();
2374 (void)serverVersion;
2375
2376 auto untrustedServer = std::make_unique<Server>();
2377 ASSERT_TRUE(untrustedServer->setUp(GetParam()));
2378
2379 Client client(untrustedServer->getConnectToServerFn());
2380 ASSERT_TRUE(client.setUp(GetParam()));
2381
2382 ASSERT_EQ(OK, trust(untrustedServer, &client));
2383
2384 untrustedServer->start();
2385
2386 // For TLS, this should reject the certificate. For RAW sockets, it should pass because
2387 // the client can't verify the server's identity.
2388 bool handshakeOk = rpcSecurity != RpcSecurity::TLS;
2389 client.run(handshakeOk);
2390 }
TEST_P(RpcTransportTest,MaliciousServer)2391 TEST_P(RpcTransportTest, MaliciousServer) {
2392 auto [socketType, rpcSecurity, certificateFormat, serverVersion] = GetParam();
2393 (void)serverVersion;
2394
2395 auto validServer = std::make_unique<Server>();
2396 ASSERT_TRUE(validServer->setUp(GetParam()));
2397
2398 auto maliciousServer = std::make_unique<Server>();
2399 ASSERT_TRUE(maliciousServer->setUp(GetParam()));
2400
2401 Client client(maliciousServer->getConnectToServerFn());
2402 ASSERT_TRUE(client.setUp(GetParam()));
2403
2404 ASSERT_EQ(OK, trust(&client, validServer));
2405 ASSERT_EQ(OK, trust(validServer, &client));
2406 ASSERT_EQ(OK, trust(maliciousServer, &client));
2407
2408 maliciousServer->start();
2409
2410 // For TLS, this should reject the certificate. For RAW sockets, it should pass because
2411 // the client can't verify the server's identity.
2412 bool handshakeOk = rpcSecurity != RpcSecurity::TLS;
2413 client.run(handshakeOk);
2414 }
2415
TEST_P(RpcTransportTest,UntrustedClient)2416 TEST_P(RpcTransportTest, UntrustedClient) {
2417 auto [socketType, rpcSecurity, certificateFormat, serverVersion] = GetParam();
2418 (void)serverVersion;
2419
2420 auto server = std::make_unique<Server>();
2421 ASSERT_TRUE(server->setUp(GetParam()));
2422
2423 Client client(server->getConnectToServerFn());
2424 ASSERT_TRUE(client.setUp(GetParam()));
2425
2426 ASSERT_EQ(OK, trust(&client, server));
2427
2428 server->start();
2429
2430 // For TLS, Client should be able to verify server's identity, so client should see
2431 // do_handshake() successfully executed. However, server shouldn't be able to verify client's
2432 // identity and should drop the connection, so client shouldn't be able to read anything.
2433 bool readOk = rpcSecurity != RpcSecurity::TLS;
2434 client.run(true, readOk);
2435 }
2436
TEST_P(RpcTransportTest,MaliciousClient)2437 TEST_P(RpcTransportTest, MaliciousClient) {
2438 auto [socketType, rpcSecurity, certificateFormat, serverVersion] = GetParam();
2439 (void)serverVersion;
2440
2441 auto server = std::make_unique<Server>();
2442 ASSERT_TRUE(server->setUp(GetParam()));
2443
2444 Client validClient(server->getConnectToServerFn());
2445 ASSERT_TRUE(validClient.setUp(GetParam()));
2446 Client maliciousClient(server->getConnectToServerFn());
2447 ASSERT_TRUE(maliciousClient.setUp(GetParam()));
2448
2449 ASSERT_EQ(OK, trust(&validClient, server));
2450 ASSERT_EQ(OK, trust(&maliciousClient, server));
2451
2452 server->start();
2453
2454 // See UntrustedClient.
2455 bool readOk = rpcSecurity != RpcSecurity::TLS;
2456 maliciousClient.run(true, readOk);
2457 }
2458
TEST_P(RpcTransportTest,Trigger)2459 TEST_P(RpcTransportTest, Trigger) {
2460 std::string msg2 = ", world!";
2461 std::mutex writeMutex;
2462 std::condition_variable writeCv;
2463 bool shouldContinueWriting = false;
2464 auto serverPostConnect = [&](RpcTransport* serverTransport, FdTrigger* fdTrigger) {
2465 std::string message(RpcTransportTestUtils::kMessage);
2466 iovec messageIov{message.data(), message.size()};
2467 auto status = serverTransport->interruptableWriteFully(fdTrigger, &messageIov, 1,
2468 std::nullopt, nullptr);
2469 if (status != OK) return AssertionFailure() << statusToString(status);
2470
2471 {
2472 std::unique_lock<std::mutex> lock(writeMutex);
2473 if (!writeCv.wait_for(lock, 3s, [&] { return shouldContinueWriting; })) {
2474 return AssertionFailure() << "write barrier not cleared in time!";
2475 }
2476 }
2477
2478 iovec msg2Iov{msg2.data(), msg2.size()};
2479 status = serverTransport->interruptableWriteFully(fdTrigger, &msg2Iov, 1, std::nullopt,
2480 nullptr);
2481 if (status != DEAD_OBJECT)
2482 return AssertionFailure() << "When FdTrigger is shut down, interruptableWriteFully "
2483 "should return DEAD_OBJECT, but it is "
2484 << statusToString(status);
2485 return AssertionSuccess();
2486 };
2487
2488 auto server = std::make_unique<Server>();
2489 ASSERT_TRUE(server->setUp(GetParam()));
2490
2491 // Set up client
2492 Client client(server->getConnectToServerFn());
2493 ASSERT_TRUE(client.setUp(GetParam()));
2494
2495 // Exchange keys
2496 ASSERT_EQ(OK, trust(&client, server));
2497 ASSERT_EQ(OK, trust(server, &client));
2498
2499 server->setPostConnect(serverPostConnect);
2500
2501 server->start();
2502 // connect() to server and do handshake
2503 ASSERT_TRUE(client.setUpTransport());
2504 // read the first message. This ensures that server has finished handshake and start handling
2505 // client fd. Server thread should pause at writeCv.wait_for().
2506 ASSERT_TRUE(client.readMessage(RpcTransportTestUtils::kMessage));
2507 // Trigger server shutdown after server starts handling client FD. This ensures that the second
2508 // write is on an FdTrigger that has been shut down.
2509 server->shutdown();
2510 // Continues server thread to write the second message.
2511 {
2512 std::lock_guard<std::mutex> lock(writeMutex);
2513 shouldContinueWriting = true;
2514 }
2515 writeCv.notify_all();
2516 // After this line, server thread unblocks and attempts to write the second message, but
2517 // shutdown is triggered, so write should failed with DEAD_OBJECT. See |serverPostConnect|.
2518 // On the client side, second read fails with DEAD_OBJECT
2519 ASSERT_FALSE(client.readMessage(msg2));
2520 }
2521
TEST_P(RpcTransportTest,CheckWaitingForRead)2522 TEST_P(RpcTransportTest, CheckWaitingForRead) {
2523 std::mutex readMutex;
2524 std::condition_variable readCv;
2525 bool shouldContinueReading = false;
2526 // Server will write data on transport once its started
2527 auto serverPostConnect = [&](RpcTransport* serverTransport, FdTrigger* fdTrigger) {
2528 std::string message(RpcTransportTestUtils::kMessage);
2529 iovec messageIov{message.data(), message.size()};
2530 auto status = serverTransport->interruptableWriteFully(fdTrigger, &messageIov, 1,
2531 std::nullopt, nullptr);
2532 if (status != OK) return AssertionFailure() << statusToString(status);
2533
2534 {
2535 std::unique_lock<std::mutex> lock(readMutex);
2536 shouldContinueReading = true;
2537 lock.unlock();
2538 readCv.notify_all();
2539 }
2540 return AssertionSuccess();
2541 };
2542
2543 // Setup Server and client
2544 auto server = std::make_unique<Server>();
2545 ASSERT_TRUE(server->setUp(GetParam()));
2546
2547 Client client(server->getConnectToServerFn());
2548 ASSERT_TRUE(client.setUp(GetParam()));
2549
2550 ASSERT_EQ(OK, trust(&client, server));
2551 ASSERT_EQ(OK, trust(server, &client));
2552 server->setPostConnect(serverPostConnect);
2553
2554 server->start();
2555 ASSERT_TRUE(client.setUpTransport());
2556 {
2557 // Wait till server writes data
2558 std::unique_lock<std::mutex> lock(readMutex);
2559 ASSERT_TRUE(readCv.wait_for(lock, 3s, [&] { return shouldContinueReading; }));
2560 }
2561
2562 // Since there is no read polling here, we will get polling count 0
2563 ASSERT_FALSE(client.isTransportWaiting());
2564 ASSERT_TRUE(client.readMessage(RpcTransportTestUtils::kMessage));
2565 // Thread should increment polling count, read and decrement polling count
2566 // Again, polling count should be zero here
2567 ASSERT_FALSE(client.isTransportWaiting());
2568
2569 server->shutdown();
2570 }
2571
2572 INSTANTIATE_TEST_SUITE_P(BinderRpc, RpcTransportTest,
2573 ::testing::ValuesIn(RpcTransportTest::getRpcTranportTestParams()),
2574 RpcTransportTest::PrintParamInfo);
2575
2576 class RpcTransportTlsKeyTest
2577 : public testing::TestWithParam<
2578 std::tuple<SocketType, RpcCertificateFormat, RpcKeyFormat, uint32_t>> {
2579 public:
2580 template <typename A, typename B>
trust(const A & a,const B & b)2581 status_t trust(const A& a, const B& b) {
2582 auto [socketType, certificateFormat, keyFormat, serverVersion] = GetParam();
2583 (void)serverVersion;
2584 return RpcTransportTestUtils::trust(RpcSecurity::TLS, certificateFormat, a, b);
2585 }
PrintParamInfo(const testing::TestParamInfo<ParamType> & info)2586 static std::string PrintParamInfo(const testing::TestParamInfo<ParamType>& info) {
2587 auto [socketType, certificateFormat, keyFormat, serverVersion] = info.param;
2588 return PrintToString(socketType) + "_certificate_" + PrintToString(certificateFormat) +
2589 "_key_" + PrintToString(keyFormat) + "_serverV" + std::to_string(serverVersion);
2590 };
2591 };
2592
TEST_P(RpcTransportTlsKeyTest,PreSignedCertificate)2593 TEST_P(RpcTransportTlsKeyTest, PreSignedCertificate) {
2594 if constexpr (!kEnableRpcThreads) {
2595 GTEST_SKIP() << "Test skipped because threads were disabled at build time";
2596 }
2597
2598 auto [socketType, certificateFormat, keyFormat, serverVersion] = GetParam();
2599
2600 std::vector<uint8_t> pkeyData, certData;
2601 {
2602 auto pkey = makeKeyPairForSelfSignedCert();
2603 ASSERT_NE(nullptr, pkey);
2604 auto cert = makeSelfSignedCert(pkey.get(), kCertValidSeconds);
2605 ASSERT_NE(nullptr, cert);
2606 pkeyData = serializeUnencryptedPrivatekey(pkey.get(), keyFormat);
2607 certData = serializeCertificate(cert.get(), certificateFormat);
2608 }
2609
2610 auto desPkey = deserializeUnencryptedPrivatekey(pkeyData, keyFormat);
2611 auto desCert = deserializeCertificate(certData, certificateFormat);
2612 auto auth = std::make_unique<RpcAuthPreSigned>(std::move(desPkey), std::move(desCert));
2613 auto utilsParam = std::make_tuple(socketType, RpcSecurity::TLS,
2614 std::make_optional(certificateFormat), serverVersion);
2615
2616 auto server = std::make_unique<RpcTransportTestUtils::Server>();
2617 ASSERT_TRUE(server->setUp(utilsParam, std::move(auth)));
2618
2619 RpcTransportTestUtils::Client client(server->getConnectToServerFn());
2620 ASSERT_TRUE(client.setUp(utilsParam));
2621
2622 ASSERT_EQ(OK, trust(&client, server));
2623 ASSERT_EQ(OK, trust(server, &client));
2624
2625 server->start();
2626 client.run();
2627 }
2628
2629 INSTANTIATE_TEST_SUITE_P(
2630 BinderRpc, RpcTransportTlsKeyTest,
2631 testing::Combine(testing::ValuesIn(testSocketTypes(false /* hasPreconnected*/)),
2632 testing::Values(RpcCertificateFormat::PEM, RpcCertificateFormat::DER),
2633 testing::Values(RpcKeyFormat::PEM, RpcKeyFormat::DER),
2634 testing::ValuesIn(testVersions())),
2635 RpcTransportTlsKeyTest::PrintParamInfo);
2636 #endif // BINDER_RPC_TO_TRUSTY_TEST
2637
2638 } // namespace android
2639
main(int argc,char ** argv)2640 int main(int argc, char** argv) {
2641 ::testing::InitGoogleTest(&argc, argv);
2642 __android_log_set_logger(__android_log_stderr_logger);
2643
2644 return RUN_ALL_TESTS();
2645 }
2646