1 // SPDX-License-Identifier: GPL-2.0
2 #include "tests.h"
3 #include <stdio.h>
4 #include "cpumap.h"
5 #include "event.h"
6 #include "util/synthetic-events.h"
7 #include <string.h>
8 #include <linux/bitops.h>
9 #include <internal/cpumap.h>
10 #include "debug.h"
11
12 struct machine;
13
process_event_mask(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)14 static int process_event_mask(const struct perf_tool *tool __maybe_unused,
15 union perf_event *event,
16 struct perf_sample *sample __maybe_unused,
17 struct machine *machine __maybe_unused)
18 {
19 struct perf_record_cpu_map *map_event = &event->cpu_map;
20 struct perf_record_cpu_map_data *data;
21 struct perf_cpu_map *map;
22 unsigned int long_size;
23
24 data = &map_event->data;
25
26 TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK);
27
28 long_size = data->mask32_data.long_size;
29
30 TEST_ASSERT_VAL("wrong long_size", long_size == 4 || long_size == 8);
31
32 TEST_ASSERT_VAL("wrong nr", data->mask32_data.nr == 1);
33
34 TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(0, data));
35 TEST_ASSERT_VAL("wrong cpu", !perf_record_cpu_map_data__test_bit(1, data));
36 for (int i = 2; i <= 20; i++)
37 TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(i, data));
38
39 map = cpu_map__new_data(data);
40 TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 20);
41
42 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 0);
43 for (int i = 2; i <= 20; i++)
44 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, i - 1).cpu == i);
45
46 perf_cpu_map__put(map);
47 return 0;
48 }
49
process_event_cpus(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)50 static int process_event_cpus(const struct perf_tool *tool __maybe_unused,
51 union perf_event *event,
52 struct perf_sample *sample __maybe_unused,
53 struct machine *machine __maybe_unused)
54 {
55 struct perf_record_cpu_map *map_event = &event->cpu_map;
56 struct perf_record_cpu_map_data *data;
57 struct perf_cpu_map *map;
58
59 data = &map_event->data;
60
61 TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__CPUS);
62
63 TEST_ASSERT_VAL("wrong nr", data->cpus_data.nr == 2);
64 TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[0] == 1);
65 TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[1] == 256);
66
67 map = cpu_map__new_data(data);
68 TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 2);
69 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
70 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 1).cpu == 256);
71 TEST_ASSERT_VAL("wrong refcnt", refcount_read(perf_cpu_map__refcnt(map)) == 1);
72 perf_cpu_map__put(map);
73 return 0;
74 }
75
process_event_range_cpus(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)76 static int process_event_range_cpus(const struct perf_tool *tool __maybe_unused,
77 union perf_event *event,
78 struct perf_sample *sample __maybe_unused,
79 struct machine *machine __maybe_unused)
80 {
81 struct perf_record_cpu_map *map_event = &event->cpu_map;
82 struct perf_record_cpu_map_data *data;
83 struct perf_cpu_map *map;
84
85 data = &map_event->data;
86
87 TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__RANGE_CPUS);
88
89 TEST_ASSERT_VAL("wrong any_cpu", data->range_cpu_data.any_cpu == 0);
90 TEST_ASSERT_VAL("wrong start_cpu", data->range_cpu_data.start_cpu == 1);
91 TEST_ASSERT_VAL("wrong end_cpu", data->range_cpu_data.end_cpu == 256);
92
93 map = cpu_map__new_data(data);
94 TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 256);
95 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
96 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__max(map).cpu == 256);
97 TEST_ASSERT_VAL("wrong refcnt", refcount_read(perf_cpu_map__refcnt(map)) == 1);
98 perf_cpu_map__put(map);
99 return 0;
100 }
101
102
test__cpu_map_synthesize(struct test_suite * test __maybe_unused,int subtest __maybe_unused)103 static int test__cpu_map_synthesize(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
104 {
105 struct perf_cpu_map *cpus;
106
107 /* This one is better stored in a mask. */
108 cpus = perf_cpu_map__new("0,2-20");
109
110 TEST_ASSERT_VAL("failed to synthesize map",
111 !perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
112
113 perf_cpu_map__put(cpus);
114
115 /* This one is better stored in cpu values. */
116 cpus = perf_cpu_map__new("1,256");
117
118 TEST_ASSERT_VAL("failed to synthesize map",
119 !perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
120
121 perf_cpu_map__put(cpus);
122
123 /* This one is better stored as a range. */
124 cpus = perf_cpu_map__new("1-256");
125
126 TEST_ASSERT_VAL("failed to synthesize map",
127 !perf_event__synthesize_cpu_map(NULL, cpus, process_event_range_cpus, NULL));
128
129 perf_cpu_map__put(cpus);
130 return 0;
131 }
132
cpu_map_print(const char * str)133 static int cpu_map_print(const char *str)
134 {
135 struct perf_cpu_map *map = perf_cpu_map__new(str);
136 char buf[100];
137
138 if (!map)
139 return -1;
140
141 cpu_map__snprint(map, buf, sizeof(buf));
142 perf_cpu_map__put(map);
143
144 return !strcmp(buf, str);
145 }
146
test__cpu_map_print(struct test_suite * test __maybe_unused,int subtest __maybe_unused)147 static int test__cpu_map_print(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
148 {
149 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1"));
150 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,5"));
151 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3,5,7,9,11,13,15,17,19,21-40"));
152 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("2-5"));
153 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
154 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
155 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1-10,12-20,22-30,32-40"));
156 return 0;
157 }
158
__test__cpu_map_merge(const char * lhs,const char * rhs,int nr,const char * expected)159 static int __test__cpu_map_merge(const char *lhs, const char *rhs, int nr, const char *expected)
160 {
161 struct perf_cpu_map *a = perf_cpu_map__new(lhs);
162 struct perf_cpu_map *b = perf_cpu_map__new(rhs);
163 char buf[100];
164
165 perf_cpu_map__merge(&a, b);
166 TEST_ASSERT_VAL("failed to merge map: bad nr", perf_cpu_map__nr(a) == nr);
167 cpu_map__snprint(a, buf, sizeof(buf));
168 TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, expected));
169 perf_cpu_map__put(b);
170
171 /*
172 * If 'b' is a superset of 'a', 'a' points to the same map with the
173 * map 'b'. In this case, the owner 'b' has released the resource above
174 * but 'a' still keeps the ownership, the reference counter should be 1.
175 */
176 TEST_ASSERT_VAL("unexpected refcnt: bad result",
177 refcount_read(perf_cpu_map__refcnt(a)) == 1);
178
179 perf_cpu_map__put(a);
180 return 0;
181 }
182
test__cpu_map_merge(struct test_suite * test __maybe_unused,int subtest __maybe_unused)183 static int test__cpu_map_merge(struct test_suite *test __maybe_unused,
184 int subtest __maybe_unused)
185 {
186 int ret;
187
188 ret = __test__cpu_map_merge("4,2,1", "4,5,7", 5, "1-2,4-5,7");
189 if (ret)
190 return ret;
191 ret = __test__cpu_map_merge("1-8", "6-9", 9, "1-9");
192 if (ret)
193 return ret;
194 ret = __test__cpu_map_merge("1-8,12-20", "6-9,15", 18, "1-9,12-20");
195 if (ret)
196 return ret;
197 ret = __test__cpu_map_merge("4,2,1", "1", 3, "1-2,4");
198 if (ret)
199 return ret;
200 ret = __test__cpu_map_merge("1", "4,2,1", 3, "1-2,4");
201 if (ret)
202 return ret;
203 ret = __test__cpu_map_merge("1", "1", 1, "1");
204 return ret;
205 }
206
__test__cpu_map_intersect(const char * lhs,const char * rhs,int nr,const char * expected)207 static int __test__cpu_map_intersect(const char *lhs, const char *rhs, int nr, const char *expected)
208 {
209 struct perf_cpu_map *a = perf_cpu_map__new(lhs);
210 struct perf_cpu_map *b = perf_cpu_map__new(rhs);
211 struct perf_cpu_map *c = perf_cpu_map__intersect(a, b);
212 char buf[100];
213
214 TEST_ASSERT_EQUAL("failed to intersect map: bad nr", perf_cpu_map__nr(c), nr);
215 cpu_map__snprint(c, buf, sizeof(buf));
216 TEST_ASSERT_VAL("failed to intersect map: bad result", !strcmp(buf, expected));
217 perf_cpu_map__put(a);
218 perf_cpu_map__put(b);
219 perf_cpu_map__put(c);
220 return 0;
221 }
222
test__cpu_map_intersect(struct test_suite * test __maybe_unused,int subtest __maybe_unused)223 static int test__cpu_map_intersect(struct test_suite *test __maybe_unused,
224 int subtest __maybe_unused)
225 {
226 int ret;
227
228 ret = __test__cpu_map_intersect("4,2,1", "4,5,7", 1, "4");
229 if (ret)
230 return ret;
231 ret = __test__cpu_map_intersect("1-8", "6-9", 3, "6-8");
232 if (ret)
233 return ret;
234 ret = __test__cpu_map_intersect("1-8,12-20", "6-9,15", 4, "6-8,15");
235 if (ret)
236 return ret;
237 ret = __test__cpu_map_intersect("4,2,1", "1", 1, "1");
238 if (ret)
239 return ret;
240 ret = __test__cpu_map_intersect("1", "4,2,1", 1, "1");
241 if (ret)
242 return ret;
243 ret = __test__cpu_map_intersect("1", "1", 1, "1");
244 return ret;
245 }
246
test__cpu_map_equal(struct test_suite * test __maybe_unused,int subtest __maybe_unused)247 static int test__cpu_map_equal(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
248 {
249 struct perf_cpu_map *any = perf_cpu_map__new_any_cpu();
250 struct perf_cpu_map *one = perf_cpu_map__new("1");
251 struct perf_cpu_map *two = perf_cpu_map__new("2");
252 struct perf_cpu_map *empty = perf_cpu_map__intersect(one, two);
253 struct perf_cpu_map *pair = perf_cpu_map__new("1-2");
254 struct perf_cpu_map *tmp;
255 struct perf_cpu_map **maps[] = {&empty, &any, &one, &two, &pair};
256
257 for (size_t i = 0; i < ARRAY_SIZE(maps); i++) {
258 /* Maps equal themself. */
259 TEST_ASSERT_VAL("equal", perf_cpu_map__equal(*maps[i], *maps[i]));
260 for (size_t j = 0; j < ARRAY_SIZE(maps); j++) {
261 /* Maps dont't equal each other. */
262 if (i == j)
263 continue;
264 TEST_ASSERT_VAL("not equal", !perf_cpu_map__equal(*maps[i], *maps[j]));
265 }
266 }
267
268 /* Maps equal made maps. */
269 perf_cpu_map__merge(&two, one);
270 TEST_ASSERT_VAL("pair", perf_cpu_map__equal(pair, two));
271
272 tmp = perf_cpu_map__intersect(pair, one);
273 TEST_ASSERT_VAL("one", perf_cpu_map__equal(one, tmp));
274 perf_cpu_map__put(tmp);
275
276 for (size_t i = 0; i < ARRAY_SIZE(maps); i++)
277 perf_cpu_map__put(*maps[i]);
278
279 return TEST_OK;
280 }
281
282 static struct test_case tests__cpu_map[] = {
283 TEST_CASE("Synthesize cpu map", cpu_map_synthesize),
284 TEST_CASE("Print cpu map", cpu_map_print),
285 TEST_CASE("Merge cpu map", cpu_map_merge),
286 TEST_CASE("Intersect cpu map", cpu_map_intersect),
287 TEST_CASE("Equal cpu map", cpu_map_equal),
288 { .name = NULL, }
289 };
290
291 struct test_suite suite__cpu_map = {
292 .desc = "CPU map",
293 .test_cases = tests__cpu_map,
294 };
295