1 /*
2 american fuzzy lop++ - free CPU gizmo
3 -----------------------------------
4
5 Originally written by Michal Zalewski
6
7 Now maintained by Marc Heuse <[email protected]>,
8 Heiko Eißfeldt <[email protected]> and
9 Andrea Fioraldi <[email protected]>
10
11 Copyright 2016, 2017 Google Inc. All rights reserved.
12 Copyright 2019-2024 AFLplusplus Project. All rights reserved.
13
14 Licensed under the Apache License, Version 2.0 (the "License");
15 you may not use this file except in compliance with the License.
16 You may obtain a copy of the License at:
17
18 https://www.apache.org/licenses/LICENSE-2.0
19
20 This tool provides a fairly accurate measurement of CPU preemption rate.
21 It is meant to complement the quick-and-dirty load average widget shown
22 in the afl-fuzz UI. See docs/fuzzing_in_depth.md#c-using-multiple-cores
23 for more info.
24
25 For some work loads, the tool may actually suggest running more instances
26 than you have CPU cores. This can happen if the tested program is spending
27 a portion of its run time waiting for I/O, rather than being 100%
28 CPU-bound.
29
30 The idea for the getrusage()-based approach comes from Jakub Wilk.
31
32 */
33
34 #define AFL_MAIN
35 #ifndef _GNU_SOURCE
36 #define _GNU_SOURCE
37 #endif
38
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <unistd.h>
42 #include <string.h>
43 #include <sched.h>
44
45 #include <sys/time.h>
46 #include <sys/times.h>
47 #include <sys/resource.h>
48 #include <sys/wait.h>
49
50 #include "types.h"
51 #include "debug.h"
52 #include "common.h"
53
54 #if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
55 defined(__APPLE__) || defined(__DragonFly__) || defined(__sun)
56 #define HAVE_AFFINITY 1
57 #if defined(__FreeBSD__) || defined(__DragonFly__)
58 #include <pthread.h>
59 #include <pthread_np.h>
60 #if defined(__FreeBSD__)
61 #include <sys/cpuset.h>
62 #endif
63 #define cpu_set_t cpuset_t
64 #elif defined(__NetBSD__)
65 #include <pthread.h>
66 #elif defined(__APPLE__)
67 #include <pthread.h>
68 #include <mach/thread_act.h>
69 #include <mach/thread_policy.h>
70 #elif defined(__sun)
71 #include <sys/pset.h>
72 #endif
73 #endif /* __linux__ || __FreeBSD__ || __NetBSD__ || __APPLE__ */
74
75 /* Get CPU usage in microseconds. */
76
get_cpu_usage_us(void)77 static u64 get_cpu_usage_us(void) {
78
79 struct rusage u;
80
81 getrusage(RUSAGE_SELF, &u);
82
83 return (u.ru_utime.tv_sec * 1000000ULL) + u.ru_utime.tv_usec +
84 (u.ru_stime.tv_sec * 1000000ULL) + u.ru_stime.tv_usec;
85
86 }
87
88 /* Measure preemption rate. */
89
measure_preemption(u32 target_ms)90 static u32 measure_preemption(u32 target_ms) {
91
92 volatile u32 v1, v2 = 0;
93
94 u64 st_t, en_t, st_c, en_c, real_delta, slice_delta;
95 // s32 loop_repeats = 0;
96
97 st_t = get_cur_time_us();
98 st_c = get_cpu_usage_us();
99
100 repeat_loop:
101
102 v1 = CTEST_BUSY_CYCLES;
103
104 while (v1--) {
105
106 v2++;
107
108 }
109
110 sched_yield();
111
112 en_t = get_cur_time_us();
113
114 if (en_t - st_t < target_ms * 1000) {
115
116 // loop_repeats++;
117 goto repeat_loop;
118
119 }
120
121 /* Let's see what percentage of this time we actually had a chance to
122 run, and how much time was spent in the penalty box. */
123
124 en_c = get_cpu_usage_us();
125
126 real_delta = (en_t - st_t) / 1000;
127 slice_delta = (en_c - st_c) / 1000;
128
129 return real_delta * 100 / slice_delta;
130
131 }
132
133 /* Do the benchmark thing. */
134
main(int argc,char ** argv)135 int main(int argc, char **argv) {
136
137 if (argc > 1) {
138
139 printf("afl-gotcpu" VERSION " by Michal Zalewski\n");
140 printf("\n%s \n\n", argv[0]);
141 printf("afl-gotcpu does not have command line options\n");
142 printf("afl-gotcpu prints out which CPUs are available\n");
143 return -1;
144
145 }
146
147 #ifdef HAVE_AFFINITY
148
149 u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), idle_cpus = 0, maybe_cpus = 0, i;
150
151 SAYF(cCYA "afl-gotcpu" VERSION cRST " by Michal Zalewski\n");
152
153 ACTF("Measuring per-core preemption rate (this will take %0.02f sec)...",
154 ((double)CTEST_CORE_TRG_MS) / 1000);
155
156 for (i = 0; i < cpu_cnt; i++) {
157
158 s32 fr = fork();
159
160 if (fr < 0) { PFATAL("fork failed"); }
161
162 if (!fr) {
163
164 u32 util_perc;
165 #if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
166 cpu_set_t c;
167
168 CPU_ZERO(&c);
169 CPU_SET(i, &c);
170 #elif defined(__NetBSD__)
171 cpuset_t *c;
172
173 c = cpuset_create();
174 if (c == NULL) PFATAL("cpuset_create failed");
175
176 cpuset_set(i, c);
177 #elif defined(__APPLE__) && defined(__x86_64__)
178 // the api is not workable on arm64, core's principle
179 // differs significantly hive of core per type vs individual ones.
180 // Possible TODO: For arm64 is to slightly change the meaning
181 // of gotcpu since it makes no sense on this platform
182 // but rather just displaying current policy ?
183 thread_affinity_policy_data_t c = {i};
184 thread_port_t native_thread = pthread_mach_thread_np(pthread_self());
185 if (thread_policy_set(native_thread, THREAD_AFFINITY_POLICY,
186 (thread_policy_t)&c, 1) != KERN_SUCCESS)
187 PFATAL("thread_policy_set failed");
188 #elif defined(__sun)
189 psetid_t c;
190
191 if (pset_create(&c)) PFATAL("pset_create failed");
192
193 if (pset_assign(c, i, NULL)) PFATAL("pset_assign failed");
194 #endif
195
196 #if defined(__FreeBSD__) || defined(__DragonFly__)
197 if (pthread_setaffinity_np(pthread_self(), sizeof(c), &c))
198 PFATAL("pthread_setaffinity_np failed");
199 #endif
200
201 #if defined(__NetBSD__)
202 if (pthread_setaffinity_np(pthread_self(), cpuset_size(c), c))
203 PFATAL("pthread_setaffinity_np failed");
204
205 cpuset_destroy(c);
206 #endif
207
208 #if defined(__sun)
209 if (pset_bind(c, P_PID, getpid(), NULL)) PFATAL("pset_bind failed");
210
211 pset_destroy(c);
212 #endif
213
214 #if defined(__linux__)
215 if (sched_setaffinity(0, sizeof(c), &c)) {
216
217 const char *error_code = "Unkown error code";
218 if (errno == EFAULT) error_code = "EFAULT";
219 if (errno == EINVAL) error_code = "EINVAL";
220 if (errno == EPERM) error_code = "EPERM";
221 if (errno == ESRCH) error_code = "ESRCH";
222
223 PFATAL("sched_setaffinity failed for cpu %d, error: %s", i, error_code);
224
225 }
226
227 #endif
228
229 util_perc = measure_preemption(CTEST_CORE_TRG_MS);
230
231 if (util_perc < 110) {
232
233 SAYF(" Core #%u: " cLGN "AVAILABLE" cRST "(%u%%)\n", i, util_perc);
234 exit(0);
235
236 } else if (util_perc < 250) {
237
238 SAYF(" Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc);
239 exit(1);
240
241 }
242
243 SAYF(" Core #%u: " cLRD "OVERBOOKED " cRST "(%u%%)\n" cRST, i,
244 util_perc);
245 exit(2);
246
247 }
248
249 }
250
251 for (i = 0; i < cpu_cnt; i++) {
252
253 int ret;
254 if (waitpid(-1, &ret, 0) < 0) { PFATAL("waitpid failed"); }
255
256 if (WEXITSTATUS(ret) == 0) { idle_cpus++; }
257 if (WEXITSTATUS(ret) <= 1) { maybe_cpus++; }
258
259 }
260
261 SAYF(cGRA "\n>>> ");
262
263 if (idle_cpus) {
264
265 if (maybe_cpus == idle_cpus) {
266
267 SAYF(cLGN "PASS: " cRST "You can run more processes on %u core%s.",
268 idle_cpus, idle_cpus > 1 ? "s" : "");
269
270 } else {
271
272 SAYF(cLGN "PASS: " cRST "You can run more processes on %u to %u core%s.",
273 idle_cpus, maybe_cpus, maybe_cpus > 1 ? "s" : "");
274
275 }
276
277 SAYF(cGRA " <<<" cRST "\n\n");
278 return 0;
279
280 }
281
282 if (maybe_cpus) {
283
284 SAYF(cYEL "CAUTION: " cRST "You may still have %u core%s available.",
285 maybe_cpus, maybe_cpus > 1 ? "s" : "");
286 SAYF(cGRA " <<<" cRST "\n\n");
287 return 1;
288
289 }
290
291 SAYF(cLRD "FAIL: " cRST "All cores are overbooked.");
292 SAYF(cGRA " <<<" cRST "\n\n");
293 return 2;
294
295 #else
296
297 u32 util_perc;
298
299 SAYF(cCYA "afl-gotcpu" VERSION cRST " by Michal Zalewski\n");
300
301 /* Run a busy loop for CTEST_TARGET_MS. */
302
303 ACTF("Measuring gross preemption rate (this will take %0.02f sec)...",
304 ((double)CTEST_TARGET_MS) / 1000);
305
306 util_perc = measure_preemption(CTEST_TARGET_MS);
307
308 /* Deliver the final verdict. */
309
310 SAYF(cGRA "\n>>> ");
311
312 if (util_perc < 105) {
313
314 SAYF(cLGN "PASS: " cRST "You can probably run additional processes.");
315
316 } else if (util_perc < 130) {
317
318 SAYF(cYEL "CAUTION: " cRST "Your CPU may be somewhat overbooked (%u%%).",
319 util_perc);
320
321 } else {
322
323 SAYF(cLRD "FAIL: " cRST "Your CPU is overbooked (%u%%).", util_perc);
324
325 }
326
327 SAYF(cGRA " <<<" cRST "\n\n");
328
329 return (util_perc > 105) + (util_perc > 130);
330
331 #endif /* ^HAVE_AFFINITY */
332
333 }
334
335