1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2018 Cyril Hrubis <[email protected]>
4 */
5
6 /*
7 * We are testing set_mempolicy() with MPOL_BIND and MPOL_PREFERRED.
8 *
9 * For each node with memory we set its bit in nodemask with set_mempolicy()
10 * and verify that memory has been allocated accordingly.
11 */
12
13 #include <errno.h>
14 #include "config.h"
15 #ifdef HAVE_NUMA_V2
16 # include <numa.h>
17 # include <numaif.h>
18 #endif
19 #include "tst_test.h"
20 #include "tst_numa.h"
21
22 #ifdef HAVE_NUMA_V2
23
24 #include "set_mempolicy.h"
25
26 static size_t page_size;
27 static struct tst_nodemap *nodes;
28
29 #define PAGES_ALLOCATED 16u
30
setup(void)31 static void setup(void)
32 {
33 page_size = getpagesize();
34
35 nodes = tst_get_nodemap(TST_NUMA_MEM, 2 * PAGES_ALLOCATED * page_size / 1024);
36 if (nodes->cnt <= 1)
37 tst_brk(TCONF, "Test requires at least two NUMA memory nodes");
38
39 /*
40 * In most cases, set_mempolicy01 finish quickly, but when the platform
41 * has multiple NUMA nodes, the test matrix combination grows exponentially
42 * and bring about test time to increase extremely fast.
43 *
44 * Here reset the maximum runtime according to the NUMA nodes.
45 */
46 tst_set_max_runtime(test.max_runtime * (1 << nodes->cnt/16));
47 }
48
cleanup(void)49 static void cleanup(void)
50 {
51 tst_nodemap_free(nodes);
52 }
53
verify_mempolicy(unsigned int node,int mode)54 static void verify_mempolicy(unsigned int node, int mode)
55 {
56 struct bitmask *bm = numa_allocate_nodemask();
57 unsigned int i;
58
59 numa_bitmask_setbit(bm, node);
60
61 TEST(set_mempolicy(mode, bm->maskp, bm->size+1));
62
63 if (TST_RET) {
64 tst_res(TFAIL | TTERRNO,
65 "set_mempolicy(%s) node %u",
66 tst_mempolicy_mode_name(mode), node);
67 return;
68 }
69
70 tst_res(TPASS, "set_mempolicy(%s) node %u",
71 tst_mempolicy_mode_name(mode), node);
72
73 numa_free_nodemask(bm);
74
75 const char *prefix = "child: ";
76
77 if (SAFE_FORK()) {
78 prefix = "parent: ";
79 tst_reap_children();
80 }
81
82 tst_nodemap_reset_counters(nodes);
83 alloc_fault_count(nodes, NULL, PAGES_ALLOCATED * page_size);
84 tst_nodemap_print_counters(nodes);
85
86 for (i = 0; i < nodes->cnt; i++) {
87 if (nodes->map[i] == node) {
88 if (nodes->counters[i] == PAGES_ALLOCATED) {
89 tst_res(TPASS, "%sNode %u allocated %u",
90 prefix, node, PAGES_ALLOCATED);
91 } else {
92 tst_res(TFAIL, "%sNode %u allocated %u, expected %u",
93 prefix, node, nodes->counters[i],
94 PAGES_ALLOCATED);
95 }
96 continue;
97 }
98
99 if (nodes->counters[i]) {
100 tst_res(TFAIL, "%sNode %u allocated %u, expected 0",
101 prefix, i, nodes->counters[i]);
102 }
103 }
104 }
105
verify_set_mempolicy(unsigned int n)106 static void verify_set_mempolicy(unsigned int n)
107 {
108 unsigned int i;
109 int mode = n ? MPOL_PREFERRED : MPOL_BIND;
110
111 for (i = 0; i < nodes->cnt; i++)
112 verify_mempolicy(nodes->map[i], mode);
113 }
114
115 static struct tst_test test = {
116 .setup = setup,
117 .cleanup = cleanup,
118 .test = verify_set_mempolicy,
119 .tcnt = 2,
120 .forks_child = 1,
121 .needs_checkpoints = 1,
122 .max_runtime = 600,
123 };
124
125 #else
126
127 TST_TEST_TCONF(NUMA_ERROR_MSG);
128
129 #endif /* HAVE_NUMA_V2 */
130