xref: /aosp_15_r20/external/grpc-grpc/test/core/gpr/cpu_test.cc (revision cc02d7e222339f7a4f6ba5f422e6413f4bd931f2)
1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 //     http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18 
19 // Test gpr per-cpu support:
20 // gpr_cpu_num_cores()
21 // gpr_cpu_current_cpu()
22 //
23 
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <string.h>
27 
28 #include <memory>
29 
30 #include "gtest/gtest.h"
31 
32 #include <grpc/support/alloc.h>
33 #include <grpc/support/cpu.h>
34 #include <grpc/support/sync.h>
35 #include <grpc/support/time.h>
36 
37 #include "src/core/lib/gprpp/thd.h"
38 #include "test/core/util/test_config.h"
39 
40 // Test structure is essentially:
41 // 1) Figure out how many cores are present on the test system
42 // 2) Create 3 times that many threads
43 // 3) Have each thread do some amount of work (basically want to
44 //    gaurantee that all threads are running at once, and enough of them
45 //    to run on all cores).
46 // 4) Each thread checks what core it is running on, and marks that core
47 //    as "used" in the test.
48 // 5) Count number of "used" cores.
49 
50 // The test will fail if:
51 // 1) gpr_cpu_num_cores() == 0
52 // 2) Any result from gpr_cpu_current_cpu() >= gpr_cpu_num_cores()
53 // 3) Ideally, we would fail if not all cores were seen as used. Unfortunately,
54 //    this is only probabilistically true, and depends on the OS, it's
55 //    scheduler, etc. So we just print out an indication of how many were seen;
56 //    hopefully developers can use this to sanity check their system.
57 //
58 
59 // Status shared across threads
60 struct cpu_test {
61   gpr_mu mu;
62   int nthreads;
63   uint32_t ncores;
64   int is_done;
65   gpr_cv done_cv;
66   int* used;   // is this core used?
67   unsigned r;  // random number
68 };
69 
worker_thread(void * arg)70 static void worker_thread(void* arg) {
71   struct cpu_test* ct = static_cast<struct cpu_test*>(arg);
72   uint32_t cpu;
73   unsigned r = 12345678;
74   unsigned i, j;
75   // Avoid repetitive division calculations
76   int64_t max_i = 1000 / grpc_test_slowdown_factor();
77   int64_t max_j = 1000 / grpc_test_slowdown_factor();
78   for (i = 0; i < max_i; i++) {
79     // run for a bit - just calculate something random.
80     for (j = 0; j < max_j; j++) {
81       r = (r * 17) & ((r - i) | (r * i));
82     }
83     cpu = gpr_cpu_current_cpu();
84     ASSERT_LT(cpu, ct->ncores);
85     gpr_mu_lock(&ct->mu);
86     ct->used[cpu] = 1;
87     for (j = 0; j < ct->ncores; j++) {
88       if (!ct->used[j]) break;
89     }
90     gpr_mu_unlock(&ct->mu);
91     if (j == ct->ncores) {
92       break;  // all cpus have been used - no further use in running this test
93     }
94   }
95   gpr_mu_lock(&ct->mu);
96   ct->r = r;  // make it look like we care about r's value...
97   ct->nthreads--;
98   if (ct->nthreads == 0) {
99     ct->is_done = 1;
100     gpr_cv_signal(&ct->done_cv);
101   }
102   gpr_mu_unlock(&ct->mu);
103 }
104 
cpu_test(void)105 static void cpu_test(void) {
106   uint32_t i;
107   int cores_seen = 0;
108   struct cpu_test ct;
109   ct.ncores = gpr_cpu_num_cores();
110   ASSERT_GT(ct.ncores, 0);
111   ct.nthreads = static_cast<int>(ct.ncores) * 3;
112   ct.used = static_cast<int*>(gpr_malloc(ct.ncores * sizeof(int)));
113   memset(ct.used, 0, ct.ncores * sizeof(int));
114   gpr_mu_init(&ct.mu);
115   gpr_cv_init(&ct.done_cv);
116   ct.is_done = 0;
117 
118   uint32_t nthreads = ct.ncores * 3;
119   grpc_core::Thread* thd =
120       static_cast<grpc_core::Thread*>(gpr_malloc(sizeof(*thd) * nthreads));
121 
122   for (i = 0; i < nthreads; i++) {
123     thd[i] = grpc_core::Thread("grpc_cpu_test", &worker_thread, &ct);
124     thd[i].Start();
125   }
126   gpr_mu_lock(&ct.mu);
127   while (!ct.is_done) {
128     gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
129   }
130   gpr_mu_unlock(&ct.mu);
131   for (i = 0; i < nthreads; i++) {
132     thd[i].Join();
133   }
134   gpr_free(thd);
135   fprintf(stderr, "Saw cores [");
136   fflush(stderr);
137   for (i = 0; i < ct.ncores; i++) {
138     if (ct.used[i]) {
139       fprintf(stderr, "%d,", i);
140       fflush(stderr);
141       cores_seen++;
142     }
143   }
144   fprintf(stderr, "] (%d/%d)\n", cores_seen, ct.ncores);
145   fflush(stderr);
146   gpr_mu_destroy(&ct.mu);
147   gpr_cv_destroy(&ct.done_cv);
148   gpr_free(ct.used);
149 }
150 
TEST(CpuTest,MainTest)151 TEST(CpuTest, MainTest) { cpu_test(); }
152 
main(int argc,char ** argv)153 int main(int argc, char** argv) {
154   grpc::testing::TestEnvironment env(&argc, argv);
155   ::testing::InitGoogleTest(&argc, argv);
156   return RUN_ALL_TESTS();
157 }
158