xref: /aosp_15_r20/external/igt-gpu-tools/tests/i915/i915_query.c (revision d83cc019efdc2edc6c4b16e9034a3ceb8d35d77c)
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "igt.h"
25 
26 #include <limits.h>
27 
28 IGT_TEST_DESCRIPTION("Testing the i915 query uAPI.");
29 
30 /*
31  * We should at least get 3 bytes for data for each slices, subslices & EUs
32  * masks.
33  */
34 #define MIN_TOPOLOGY_ITEM_SIZE (sizeof(struct drm_i915_query_topology_info) + 3)
35 
36 static int
__i915_query(int fd,struct drm_i915_query * q)37 __i915_query(int fd, struct drm_i915_query *q)
38 {
39 	if (igt_ioctl(fd, DRM_IOCTL_I915_QUERY, q))
40 		return -errno;
41 	return 0;
42 }
43 
44 static int
__i915_query_items(int fd,struct drm_i915_query_item * items,uint32_t n_items)45 __i915_query_items(int fd, struct drm_i915_query_item *items, uint32_t n_items)
46 {
47 	struct drm_i915_query q = {
48 		.num_items = n_items,
49 		.items_ptr = to_user_pointer(items),
50 	};
51 	return __i915_query(fd, &q);
52 }
53 
54 #define i915_query_items(fd, items, n_items) do { \
55 		igt_assert_eq(__i915_query_items(fd, items, n_items), 0); \
56 		errno = 0; \
57 	} while (0)
58 #define i915_query_items_err(fd, items, n_items, err) do { \
59 		igt_assert_eq(__i915_query_items(fd, items, n_items), -err); \
60 	} while (0)
61 
has_query_supports(int fd)62 static bool has_query_supports(int fd)
63 {
64 	struct drm_i915_query query = {};
65 
66 	return __i915_query(fd, &query) == 0;
67 }
68 
test_query_garbage(int fd)69 static void test_query_garbage(int fd)
70 {
71 	struct drm_i915_query query;
72 	struct drm_i915_query_item item;
73 
74 	/* Verify that invalid query pointers are rejected. */
75 	igt_assert_eq(__i915_query(fd, NULL), -EFAULT);
76 	igt_assert_eq(__i915_query(fd, (void *) -1), -EFAULT);
77 
78 	/*
79 	 * Query flags field is currently valid only if equals to 0. This might
80 	 * change in the future.
81 	 */
82 	memset(&query, 0, sizeof(query));
83 	query.flags = 42;
84 	igt_assert_eq(__i915_query(fd, &query), -EINVAL);
85 
86 	/* Test a couple of invalid pointers. */
87 	i915_query_items_err(fd, (void *) ULONG_MAX, 1, EFAULT);
88 	i915_query_items_err(fd, (void *) 0, 1, EFAULT);
89 
90 	/* Test the invalid query id = 0. */
91 	memset(&item, 0, sizeof(item));
92 	i915_query_items_err(fd, &item, 1, EINVAL);
93 }
94 
test_query_garbage_items(int fd)95 static void test_query_garbage_items(int fd)
96 {
97 	struct drm_i915_query_item items[2];
98 	struct drm_i915_query_item *items_ptr;
99 	int i, n_items;
100 
101 	/*
102 	 * Query item flags field is currently valid only if equals to 0.
103 	 * Subject to change in the future.
104 	 */
105 	memset(items, 0, sizeof(items));
106 	items[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
107 	items[0].flags = 42;
108 	i915_query_items(fd, items, 1);
109 	igt_assert_eq(items[0].length, -EINVAL);
110 
111 	/*
112 	 * Test an invalid query id in the second item and verify that the first
113 	 * one is properly processed.
114 	 */
115 	memset(items, 0, sizeof(items));
116 	items[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
117 	items[1].query_id = ULONG_MAX;
118 	i915_query_items(fd, items, 2);
119 	igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items[0].length);
120 	igt_assert_eq(items[1].length, -EINVAL);
121 
122 	/*
123 	 * Test a invalid query id in the first item and verify that the second
124 	 * one is properly processed (the driver is expected to go through them
125 	 * all and place error codes in the failed items).
126 	 */
127 	memset(items, 0, sizeof(items));
128 	items[0].query_id = ULONG_MAX;
129 	items[1].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
130 	i915_query_items(fd, items, 2);
131 	igt_assert_eq(items[0].length, -EINVAL);
132 	igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items[1].length);
133 
134 	/* Test a couple of invalid data pointer in query item. */
135 	memset(items, 0, sizeof(items));
136 	items[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
137 	i915_query_items(fd, items, 1);
138 	igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items[0].length);
139 
140 	items[0].data_ptr = 0;
141 	i915_query_items(fd, items, 1);
142 	igt_assert_eq(items[0].length, -EFAULT);
143 
144 	items[0].data_ptr = ULONG_MAX;
145 	i915_query_items(fd, items, 1);
146 	igt_assert_eq(items[0].length, -EFAULT);
147 
148 
149 	/* Test an invalid query item length. */
150 	memset(items, 0, sizeof(items));
151 	items[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
152 	items[1].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
153 	items[1].length = sizeof(struct drm_i915_query_topology_info) - 1;
154 	i915_query_items(fd, items, 2);
155 	igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items[0].length);
156 	igt_assert_eq(items[1].length, -EINVAL);
157 
158 	/*
159 	 * Map memory for a query item in which the kernel is going to write the
160 	 * length of the item in the first ioctl(). Then unmap that memory and
161 	 * verify that the kernel correctly returns EFAULT as memory of the item
162 	 * has been removed from our address space.
163 	 */
164 	items_ptr = mmap(0, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
165 	items_ptr[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
166 	i915_query_items(fd, items_ptr, 1);
167 	igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items_ptr[0].length);
168 	munmap(items_ptr, 4096);
169 	i915_query_items_err(fd, items_ptr, 1, EFAULT);
170 
171 	/*
172 	 * Map memory for a query item, then make it read only and verify that
173 	 * the kernel errors out with EFAULT.
174 	 */
175 	items_ptr = mmap(0, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
176 	items_ptr[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
177 	igt_assert_eq(0, mprotect(items_ptr, 4096, PROT_READ));
178 	i915_query_items_err(fd, items_ptr, 1, EFAULT);
179 	munmap(items_ptr, 4096);
180 
181 	/*
182 	 * Allocate 2 pages, prepare those 2 pages with valid query items, then
183 	 * switch the second page to read only and expect an EFAULT error.
184 	 */
185 	items_ptr = mmap(0, 8192, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
186 	memset(items_ptr, 0, 8192);
187 	n_items = 8192 / sizeof(struct drm_i915_query_item);
188 	for (i = 0; i < n_items; i++)
189 		items_ptr[i].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
190 	mprotect(((uint8_t *)items_ptr) + 4096, 4096, PROT_READ);
191 	i915_query_items_err(fd, items_ptr, n_items, EFAULT);
192 	munmap(items_ptr, 8192);
193 }
194 
195 /*
196  * Allocate more on both sides of where the kernel is going to write and verify
197  * that it writes only where it's supposed to.
198  */
test_query_topology_kernel_writes(int fd)199 static void test_query_topology_kernel_writes(int fd)
200 {
201 	struct drm_i915_query_item item;
202 	struct drm_i915_query_topology_info *topo_info;
203 	uint8_t *_topo_info;
204 	int b, total_size;
205 
206 	memset(&item, 0, sizeof(item));
207 	item.query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
208 	i915_query_items(fd, &item, 1);
209 	igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, item.length);
210 
211 	total_size = item.length + 2 * sizeof(*_topo_info);
212 	_topo_info = malloc(total_size);
213 	memset(_topo_info, 0xff, total_size);
214 	topo_info = (struct drm_i915_query_topology_info *) (_topo_info + sizeof(*_topo_info));
215 	memset(topo_info, 0, item.length);
216 
217 	item.data_ptr = to_user_pointer(topo_info);
218 	i915_query_items(fd, &item, 1);
219 
220 	for (b = 0; b < sizeof(*_topo_info); b++) {
221 		igt_assert_eq(_topo_info[b], 0xff);
222 		igt_assert_eq(_topo_info[sizeof(*_topo_info) + item.length + b], 0xff);
223 	}
224 }
225 
query_topology_supported(int fd)226 static bool query_topology_supported(int fd)
227 {
228 	struct drm_i915_query_item item = {
229 		.query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
230 	};
231 
232 	return __i915_query_items(fd, &item, 1) == 0 && item.length > 0;
233 }
234 
test_query_topology_unsupported(int fd)235 static void test_query_topology_unsupported(int fd)
236 {
237 	struct drm_i915_query_item item = {
238 		.query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
239 	};
240 
241 	i915_query_items(fd, &item, 1);
242 	igt_assert_eq(item.length, -ENODEV);
243 }
244 
245 static bool
slice_available(const struct drm_i915_query_topology_info * topo_info,int s)246 slice_available(const struct drm_i915_query_topology_info *topo_info,
247 		int s)
248 {
249 	return (topo_info->data[s / 8] >> (s % 8)) & 1;
250 }
251 
252 static bool
subslice_available(const struct drm_i915_query_topology_info * topo_info,int s,int ss)253 subslice_available(const struct drm_i915_query_topology_info *topo_info,
254 		   int s, int ss)
255 {
256 	return (topo_info->data[topo_info->subslice_offset +
257 				s * topo_info->subslice_stride +
258 				ss / 8] >> (ss % 8)) & 1;
259 }
260 
261 static bool
eu_available(const struct drm_i915_query_topology_info * topo_info,int s,int ss,int eu)262 eu_available(const struct drm_i915_query_topology_info *topo_info,
263 	     int s, int ss, int eu)
264 {
265 	return (topo_info->data[topo_info->eu_offset +
266 				(s * topo_info->max_subslices + ss) * topo_info->eu_stride +
267 				eu / 8] >> (eu % 8)) & 1;
268 }
269 
270 /*
271  * Verify that we get coherent values between the legacy getparam slice/subslice
272  * masks and the new topology query.
273  */
274 static void
test_query_topology_coherent_slice_mask(int fd)275 test_query_topology_coherent_slice_mask(int fd)
276 {
277 	struct drm_i915_query_item item;
278 	struct drm_i915_query_topology_info *topo_info;
279 	drm_i915_getparam_t gp;
280 	int slice_mask, subslice_mask;
281 	int s, topology_slices, topology_subslices_slice0;
282 	int32_t first_query_length;
283 
284 	gp.param = I915_PARAM_SLICE_MASK;
285 	gp.value = &slice_mask;
286 	igt_skip_on(igt_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) != 0);
287 
288 	gp.param = I915_PARAM_SUBSLICE_MASK;
289 	gp.value = &subslice_mask;
290 	igt_skip_on(igt_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) != 0);
291 
292 	/* Slices */
293 	memset(&item, 0, sizeof(item));
294 	item.query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
295 	i915_query_items(fd, &item, 1);
296 	/* We expect at least one byte for each slices, subslices & EUs masks. */
297 	igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, item.length);
298 	first_query_length = item.length;
299 
300 	topo_info = calloc(1, item.length);
301 
302 	item.data_ptr = to_user_pointer(topo_info);
303 	i915_query_items(fd, &item, 1);
304 	/* We should get the same size once the data has been written. */
305 	igt_assert_eq(first_query_length, item.length);
306 	/* We expect at least one byte for each slices, subslices & EUs masks. */
307 	igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, item.length);
308 
309 	topology_slices = 0;
310 	for (s = 0; s < topo_info->max_slices; s++) {
311 		if (slice_available(topo_info, s))
312 			topology_slices |= 1UL << s;
313 	}
314 
315 	igt_debug("slice mask getparam=0x%x / query=0x%x\n",
316 		  slice_mask, topology_slices);
317 
318 	/* These 2 should always match. */
319 	igt_assert_eq(slice_mask, topology_slices);
320 
321 	topology_subslices_slice0 = 0;
322 	for (s = 0; s < topo_info->max_subslices; s++) {
323 		if (subslice_available(topo_info, 0, s))
324 			topology_subslices_slice0 |= 1UL << s;
325 	}
326 
327 	igt_debug("subslice mask getparam=0x%x / query=0x%x\n",
328 		  subslice_mask, topology_subslices_slice0);
329 
330 	/*
331 	 * I915_PARAM_SUBSLICE_MASK returns the value for slice0, we should
332 	 * match the values for the first slice of the topology.
333 	 */
334 	igt_assert_eq(subslice_mask, topology_subslices_slice0);
335 
336 	free(topo_info);
337 }
338 
339 /*
340  * Verify that we get same total number of EUs from getparam and topology query.
341  */
342 static void
test_query_topology_matches_eu_total(int fd)343 test_query_topology_matches_eu_total(int fd)
344 {
345 	struct drm_i915_query_item item;
346 	struct drm_i915_query_topology_info *topo_info;
347 	drm_i915_getparam_t gp;
348 	int n_eus, n_eus_topology, s;
349 
350 	gp.param = I915_PARAM_EU_TOTAL;
351 	gp.value = &n_eus;
352 	do_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
353 	igt_debug("n_eus=%i\n", n_eus);
354 
355 	memset(&item, 0, sizeof(item));
356 	item.query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
357 	i915_query_items(fd, &item, 1);
358 
359 	topo_info = calloc(1, item.length);
360 
361 	item.data_ptr = to_user_pointer(topo_info);
362 	i915_query_items(fd, &item, 1);
363 
364 	igt_debug("max_slices=%hu max_subslices=%hu max_eus_per_subslice=%hu\n",
365 		  topo_info->max_slices, topo_info->max_subslices,
366 		  topo_info->max_eus_per_subslice);
367 	igt_debug(" subslice_offset=%hu subslice_stride=%hu\n",
368 		  topo_info->subslice_offset, topo_info->subslice_stride);
369 	igt_debug(" eu_offset=%hu eu_stride=%hu\n",
370 		  topo_info->eu_offset, topo_info->eu_stride);
371 
372 	n_eus_topology = 0;
373 	for (s = 0; s < topo_info->max_slices; s++) {
374 		int ss;
375 
376 		igt_debug("slice%i: (%s)\n", s,
377 			  slice_available(topo_info, s) ? "available" : "fused");
378 
379 		if (!slice_available(topo_info, s))
380 			continue;
381 
382 		for (ss = 0; ss < topo_info->max_subslices; ss++) {
383 			int eu, n_subslice_eus = 0;
384 
385 			igt_debug("\tsubslice%i: (%s)\n", ss,
386 				  subslice_available(topo_info, s, ss) ? "available" : "fused");
387 
388 			if (!subslice_available(topo_info, s, ss))
389 				continue;
390 
391 			igt_debug("\t\teu_mask: 0b");
392 			for (eu = 0; eu < topo_info->max_eus_per_subslice; eu++) {
393 				uint8_t val = eu_available(topo_info, s, ss,
394 							   topo_info->max_eus_per_subslice - 1 - eu);
395 				igt_debug("%hhi", val);
396 				n_subslice_eus += __builtin_popcount(val);
397 				n_eus_topology += __builtin_popcount(val);
398 			}
399 
400 			igt_debug(" (%i)\n", n_subslice_eus);
401 
402 			/* Sanity checks. */
403 			if (n_subslice_eus > 0) {
404 				igt_assert(slice_available(topo_info, s));
405 				igt_assert(subslice_available(topo_info, s, ss));
406 			}
407 			if (subslice_available(topo_info, s, ss)) {
408 				igt_assert(slice_available(topo_info, s));
409 			}
410 		}
411 	}
412 
413 	free(topo_info);
414 
415 	igt_assert(n_eus_topology == n_eus);
416 }
417 
418 /*
419  * Verify some numbers on Gens that we know for sure the characteristics from
420  * the PCI ids.
421  */
422 static void
test_query_topology_known_pci_ids(int fd,int devid)423 test_query_topology_known_pci_ids(int fd, int devid)
424 {
425 	const struct intel_device_info *dev_info = intel_get_device_info(devid);
426 	struct drm_i915_query_item item;
427 	struct drm_i915_query_topology_info *topo_info;
428 	int n_slices = 0, n_subslices = 0;
429 	int s, ss;
430 
431 	/* The GT size on some Broadwell skus is not defined, skip those. */
432 	igt_skip_on(dev_info->gt == 0);
433 
434 	memset(&item, 0, sizeof(item));
435 	item.query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
436 	i915_query_items(fd, &item, 1);
437 
438 	topo_info = (struct drm_i915_query_topology_info *) calloc(1, item.length);
439 
440 	item.data_ptr = to_user_pointer(topo_info);
441 	i915_query_items(fd, &item, 1);
442 
443 	for (s = 0; s < topo_info->max_slices; s++) {
444 		if (slice_available(topo_info, s))
445 			n_slices++;
446 
447 		for (ss = 0; ss < topo_info->max_subslices; ss++) {
448 			if (subslice_available(topo_info, s, ss))
449 				n_subslices++;
450 		}
451 	}
452 
453 	igt_debug("Platform=%s GT=%u slices=%u subslices=%u\n",
454 		  dev_info->codename, dev_info->gt, n_slices, n_subslices);
455 
456 	switch (dev_info->gt) {
457 	case 1:
458 		igt_assert_eq(n_slices, 1);
459 		igt_assert(n_subslices == 1 || n_subslices == 2 || n_subslices == 3);
460 		break;
461 	case 2:
462 		igt_assert_eq(n_slices, 1);
463 		if (dev_info->is_haswell)
464 			igt_assert_eq(n_subslices, 2);
465 		else
466 			igt_assert_eq(n_subslices, 3);
467 		break;
468 	case 3:
469 		igt_assert_eq(n_slices, 2);
470 		if (dev_info->is_haswell)
471 			igt_assert_eq(n_subslices, 2 * 2);
472 		else
473 			igt_assert_eq(n_subslices, 2 * 3);
474 		break;
475 	case 4:
476 		igt_assert_eq(n_slices, 3);
477 		igt_assert_eq(n_subslices, 3 * 3);
478 		break;
479 	default:
480 		igt_assert(false);
481 	}
482 
483 	free(topo_info);
484 }
485 
query_engine_info_supported(int fd)486 static bool query_engine_info_supported(int fd)
487 {
488 	struct drm_i915_query_item item = {
489 		.query_id = DRM_I915_QUERY_ENGINE_INFO,
490 	};
491 
492 	return __i915_query_items(fd, &item, 1) == 0 && item.length > 0;
493 }
494 
engines_invalid(int fd)495 static void engines_invalid(int fd)
496 {
497 	struct drm_i915_query_engine_info *engines;
498 	struct drm_i915_query_item item;
499 	unsigned int len;
500 
501 	/* Flags is MBZ. */
502 	memset(&item, 0, sizeof(item));
503 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
504 	item.flags = 1;
505 	i915_query_items(fd, &item, 1);
506 	igt_assert_eq(item.length, -EINVAL);
507 
508 	/* Length not zero and not greater or equal required size. */
509 	memset(&item, 0, sizeof(item));
510 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
511 	item.length = 1;
512 	i915_query_items(fd, &item, 1);
513 	igt_assert_eq(item.length, -EINVAL);
514 
515 	/* Query correct length. */
516 	memset(&item, 0, sizeof(item));
517 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
518 	i915_query_items(fd, &item, 1);
519 	igt_assert(item.length >= 0);
520 	len = item.length;
521 
522 	engines = malloc(len);
523 	igt_assert(engines);
524 
525 	/* Ivalid pointer. */
526 	memset(&item, 0, sizeof(item));
527 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
528 	item.length = len;
529 	i915_query_items(fd, &item, 1);
530 	igt_assert_eq(item.length, -EFAULT);
531 
532 	/* All fields in engines query are MBZ and only filled by the kernel. */
533 
534 	memset(engines, 0, len);
535 	engines->num_engines = 1;
536 	memset(&item, 0, sizeof(item));
537 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
538 	item.length = len;
539 	item.data_ptr = to_user_pointer(engines);
540 	i915_query_items(fd, &item, 1);
541 	igt_assert_eq(item.length, -EINVAL);
542 
543 	memset(engines, 0, len);
544 	engines->rsvd[0] = 1;
545 	memset(&item, 0, sizeof(item));
546 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
547 	item.length = len;
548 	item.data_ptr = to_user_pointer(engines);
549 	i915_query_items(fd, &item, 1);
550 	igt_assert_eq(item.length, -EINVAL);
551 
552 	memset(engines, 0, len);
553 	engines->rsvd[1] = 1;
554 	memset(&item, 0, sizeof(item));
555 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
556 	item.length = len;
557 	item.data_ptr = to_user_pointer(engines);
558 	i915_query_items(fd, &item, 1);
559 	igt_assert_eq(item.length, -EINVAL);
560 
561 	memset(engines, 0, len);
562 	engines->rsvd[2] = 1;
563 	memset(&item, 0, sizeof(item));
564 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
565 	item.length = len;
566 	item.data_ptr = to_user_pointer(engines);
567 	i915_query_items(fd, &item, 1);
568 	igt_assert_eq(item.length, -EINVAL);
569 
570 	free(engines);
571 
572 	igt_assert(len <= 4096);
573 	engines = mmap(0, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
574 		       -1, 0);
575 	igt_assert(engines != MAP_FAILED);
576 
577 	/* PROT_NONE is similar to unmapped area. */
578 	memset(engines, 0, len);
579 	igt_assert_eq(mprotect(engines, len, PROT_NONE), 0);
580 	memset(&item, 0, sizeof(item));
581 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
582 	item.length = len;
583 	item.data_ptr = to_user_pointer(engines);
584 	i915_query_items(fd, &item, 1);
585 	igt_assert_eq(item.length, -EFAULT);
586 	igt_assert_eq(mprotect(engines, len, PROT_WRITE), 0);
587 
588 	/* Read-only so kernel cannot fill the data back. */
589 	memset(engines, 0, len);
590 	igt_assert_eq(mprotect(engines, len, PROT_READ), 0);
591 	memset(&item, 0, sizeof(item));
592 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
593 	item.length = len;
594 	item.data_ptr = to_user_pointer(engines);
595 	i915_query_items(fd, &item, 1);
596 	igt_assert_eq(item.length, -EFAULT);
597 
598 	munmap(engines, 4096);
599 }
600 
601 static bool
has_engine(struct drm_i915_query_engine_info * engines,unsigned class,unsigned instance)602 has_engine(struct drm_i915_query_engine_info *engines,
603 	   unsigned class, unsigned instance)
604 {
605 	unsigned int i;
606 
607 	for (i = 0; i < engines->num_engines; i++) {
608 		struct drm_i915_engine_info *engine =
609 			(struct drm_i915_engine_info *)&engines->engines[i];
610 
611 		if (engine->engine.engine_class == class &&
612 		    engine->engine.engine_instance == instance)
613 			return true;
614 	}
615 
616 	return false;
617 }
618 
engines(int fd)619 static void engines(int fd)
620 {
621 	struct drm_i915_query_engine_info *engines;
622 	struct drm_i915_query_item item;
623 	unsigned int len, i;
624 
625 	engines = malloc(4096);
626 	igt_assert(engines);
627 
628 	/* Query required buffer length. */
629 	memset(engines, 0, 4096);
630 	memset(&item, 0, sizeof(item));
631 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
632 	item.data_ptr = to_user_pointer(engines);
633 	i915_query_items(fd, &item, 1);
634 	igt_assert(item.length >= 0);
635 	igt_assert(item.length <= 4096);
636 	len = item.length;
637 
638 	/* Check length larger than required works and reports same length. */
639 	memset(engines, 0, 4096);
640 	memset(&item, 0, sizeof(item));
641 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
642 	item.length = 4096;
643 	item.data_ptr = to_user_pointer(engines);
644 	i915_query_items(fd, &item, 1);
645 	igt_assert_eq(item.length, len);
646 
647 	/* Actual query. */
648 	memset(engines, 0, 4096);
649 	memset(&item, 0, sizeof(item));
650 	item.query_id = DRM_I915_QUERY_ENGINE_INFO;
651 	item.length = len;
652 	item.data_ptr = to_user_pointer(engines);
653 	i915_query_items(fd, &item, 1);
654 	igt_assert_eq(item.length, len);
655 
656 	/* Every GPU has at least one engine. */
657 	igt_assert(engines->num_engines > 0);
658 
659 	/* MBZ fields. */
660 	igt_assert_eq(engines->rsvd[0], 0);
661 	igt_assert_eq(engines->rsvd[1], 0);
662 	igt_assert_eq(engines->rsvd[2], 0);
663 
664 	/* Check results match the legacy GET_PARAM (where we can). */
665 	for (i = 0; i < engines->num_engines; i++) {
666 		struct drm_i915_engine_info *engine =
667 			(struct drm_i915_engine_info *)&engines->engines[i];
668 
669 		igt_debug("%u: class=%u instance=%u flags=%llx capabilities=%llx\n",
670 			  i,
671 			  engine->engine.engine_class,
672 			  engine->engine.engine_instance,
673 			  engine->flags,
674 			  engine->capabilities);
675 
676 		/* MBZ fields. */
677 		igt_assert_eq(engine->rsvd0, 0);
678 		igt_assert_eq(engine->rsvd1[0], 0);
679 		igt_assert_eq(engine->rsvd1[1], 0);
680 
681 		switch (engine->engine.engine_class) {
682 		case I915_ENGINE_CLASS_RENDER:
683 			/* Will be tested later. */
684 			break;
685 		case I915_ENGINE_CLASS_COPY:
686 			igt_assert(gem_has_blt(fd));
687 			break;
688 		case I915_ENGINE_CLASS_VIDEO:
689 			switch (engine->engine.engine_instance) {
690 			case 0:
691 				igt_assert(gem_has_bsd(fd));
692 				break;
693 			case 1:
694 				igt_assert(gem_has_bsd2(fd));
695 				break;
696 			}
697 			break;
698 		case I915_ENGINE_CLASS_VIDEO_ENHANCE:
699 			igt_assert(gem_has_vebox(fd));
700 			break;
701 		default:
702 			igt_assert(0);
703 		}
704 	}
705 
706 	/* Reverse check to the above - all GET_PARAM engines are present. */
707 	igt_assert(has_engine(engines, I915_ENGINE_CLASS_RENDER, 0));
708 	if (gem_has_blt(fd))
709 		igt_assert(has_engine(engines, I915_ENGINE_CLASS_COPY, 0));
710 	if (gem_has_bsd(fd))
711 		igt_assert(has_engine(engines, I915_ENGINE_CLASS_VIDEO, 0));
712 	if (gem_has_bsd2(fd))
713 		igt_assert(has_engine(engines, I915_ENGINE_CLASS_VIDEO, 1));
714 	if (gem_has_vebox(fd))
715 		igt_assert(has_engine(engines, I915_ENGINE_CLASS_VIDEO_ENHANCE,
716 				       0));
717 
718 	free(engines);
719 }
720 
721 igt_main
722 {
723 	int fd = -1;
724 	int devid;
725 
726 	igt_fixture {
727 		fd = drm_open_driver(DRIVER_INTEL);
728 		igt_require(has_query_supports(fd));
729 		devid = intel_get_drm_devid(fd);
730 	}
731 
732 	igt_subtest("query-garbage")
733 		test_query_garbage(fd);
734 
735 	igt_subtest("query-garbage-items") {
736 		igt_require(query_topology_supported(fd));
737 		test_query_garbage_items(fd);
738 	}
739 
740 	igt_subtest("query-topology-kernel-writes") {
741 		igt_require(query_topology_supported(fd));
742 		test_query_topology_kernel_writes(fd);
743 	}
744 
745 	igt_subtest("query-topology-unsupported") {
746 		igt_require(!query_topology_supported(fd));
747 		test_query_topology_unsupported(fd);
748 	}
749 
750 	igt_subtest("query-topology-coherent-slice-mask") {
751 		igt_require(query_topology_supported(fd));
752 		test_query_topology_coherent_slice_mask(fd);
753 	}
754 
755 	igt_subtest("query-topology-matches-eu-total") {
756 		igt_require(query_topology_supported(fd));
757 		test_query_topology_matches_eu_total(fd);
758 	}
759 
760 	igt_subtest("query-topology-known-pci-ids") {
761 		igt_require(query_topology_supported(fd));
762 		igt_require(IS_HASWELL(devid) || IS_BROADWELL(devid) ||
763 			    IS_SKYLAKE(devid) || IS_KABYLAKE(devid) ||
764 			    IS_COFFEELAKE(devid));
765 		test_query_topology_known_pci_ids(fd, devid);
766 	}
767 
768 	igt_subtest_group {
769 		igt_fixture {
770 			igt_require(query_engine_info_supported(fd));
771 		}
772 
773 		igt_subtest("engine-info-invalid")
774 			engines_invalid(fd);
775 
776 		igt_subtest("engine-info")
777 			engines(fd);
778 	}
779 
780 	igt_fixture {
781 		close(fd);
782 	}
783 }
784