1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3 * Copyright (C) 2005-2006 David Gibson & Adam Litke, IBM Corporation.
4 * Author: David Gibson & Adam Litke
5 */
6
7 /*\
8 * [Description]
9 *
10 * Some kernel versions after hugepage demand allocation was added used a
11 * dubious heuristic to check if there was enough hugepage space available
12 * for a given mapping. The number of not-already-instantiated pages in
13 * the mapping was compared against the total hugepage free pool. It was
14 * very easy to confuse this heuristic into overcommitting by allocating
15 * hugepage memory in chunks, each less than the total available pool size
16 * but together more than available. This would generally lead to OOM
17 * SIGKILLs of one process or another when it tried to instantiate pages
18 * beyond the available pool.
19 */
20
21 #define _GNU_SOURCE
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <sys/mount.h>
25 #include <limits.h>
26 #include <sys/param.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <signal.h>
30
31 #include "hugetlb.h"
32
33 #define MNTPOINT "hugetlbfs/"
34 #define WITH_OVERCOMMIT 0
35 #define WITHOUT_OVERCOMMIT 1
36
37 static long hpage_size;
38 static int huge_fd = -1;
39
test_chunk_overcommit(void)40 static void test_chunk_overcommit(void)
41 {
42 unsigned long totpages, chunk1, chunk2;
43 void *p, *q;
44 pid_t child;
45 int status;
46
47 totpages = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
48
49 chunk1 = (totpages / 2) + 1;
50 chunk2 = totpages - chunk1 + 1;
51
52 tst_res(TINFO, "Free: %ld hugepages available: "
53 "chunk1=%ld chunk2=%ld", totpages, chunk1, chunk2);
54
55 p = SAFE_MMAP(NULL, chunk1*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED,
56 huge_fd, 0);
57
58 q = mmap(NULL, chunk2*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED,
59 huge_fd, chunk1*hpage_size);
60 if (q == MAP_FAILED) {
61 if (errno != ENOMEM) {
62 tst_res(TFAIL | TERRNO, "mmap() chunk2");
63 goto cleanup1;
64 } else {
65 tst_res(TPASS, "Successful without overcommit pages");
66 goto cleanup1;
67 }
68 }
69
70 tst_res(TINFO, "Looks like we've overcommitted, testing...");
71 /* Looks like we're overcommited, but we need to confirm that
72 * this is bad. We touch it all in a child process because an
73 * overcommit will generally lead to a SIGKILL which we can't
74 * handle, of course.
75 */
76 child = SAFE_FORK();
77
78 if (child == 0) {
79 memset(p, 0, chunk1*hpage_size);
80 memset(q, 0, chunk2*hpage_size);
81 exit(0);
82 }
83
84 SAFE_WAITPID(child, &status, 0);
85
86 if (WIFSIGNALED(status)) {
87 tst_res(TFAIL, "Killed by signal '%s' due to overcommit",
88 tst_strsig(WTERMSIG(status)));
89 goto cleanup2;
90 }
91
92 tst_res(TPASS, "Successful with overcommit pages");
93
94 cleanup2:
95 SAFE_MUNMAP(q, chunk2*hpage_size);
96
97 cleanup1:
98 SAFE_MUNMAP(p, chunk1*hpage_size);
99 SAFE_FTRUNCATE(huge_fd, 0);
100 }
101
run_test(unsigned int test_type)102 static void run_test(unsigned int test_type)
103 {
104 switch (test_type) {
105 case WITHOUT_OVERCOMMIT:
106 tst_res(TINFO, "Without overcommit testing...");
107 SAFE_FILE_PRINTF(PATH_OC_HPAGES, "%d", 0);
108 break;
109 case WITH_OVERCOMMIT:
110 tst_res(TINFO, "With overcommit testing...");
111 SAFE_FILE_PRINTF(PATH_OC_HPAGES, "%d", 2);
112 break;
113 }
114 test_chunk_overcommit();
115 }
116
setup(void)117 static void setup(void)
118 {
119 hpage_size = SAFE_READ_MEMINFO(MEMINFO_HPAGE_SIZE)*1024;
120 huge_fd = tst_creat_unlinked(MNTPOINT, 0);
121 }
122
cleanup(void)123 static void cleanup(void)
124 {
125 SAFE_CLOSE(huge_fd);
126 }
127
128 static struct tst_test test = {
129 .needs_root = 1,
130 .mntpoint = MNTPOINT,
131 .needs_hugetlbfs = 1,
132 .forks_child = 1,
133 .save_restore = (const struct tst_path_val[]) {
134 {PATH_OC_HPAGES, NULL, TST_SR_TCONF},
135 {}
136 },
137 .tcnt = 2,
138 .setup = setup,
139 .cleanup = cleanup,
140 .test = run_test,
141 .hugepages = {3, TST_NEEDS},
142 };
143