xref: /aosp_15_r20/external/igt-gpu-tools/tests/prime_nv_pcopy.c (revision d83cc019efdc2edc6c4b16e9034a3ceb8d35d77c)
1 /* basic set of prime tests between intel and nouveau */
2 
3 /* test list -
4    1. share buffer from intel -> nouveau.
5    2. share buffer from nouveau -> intel
6    3. share intel->nouveau, map on both, write intel, read nouveau
7    4. share intel->nouveau, blit intel fill, readback on nouveau
8    test 1 + map buffer, read/write, map other size.
9    do some hw actions on the buffer
10    some illegal operations -
11        close prime fd try and map
12 
13    TODO add some nouveau rendering tests
14 */
15 
16 
17 #include "igt.h"
18 #include <inttypes.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <string.h>
24 #include <sys/stat.h>
25 #include <sys/ioctl.h>
26 #include <errno.h>
27 
28 #include "intel_bufmgr.h"
29 #include "nouveau.h"
30 
31 static int intel_fd = -1, nouveau_fd = -1;
32 static drm_intel_bufmgr *bufmgr;
33 static struct nouveau_device *ndev;
34 static struct nouveau_client *nclient;
35 static uint32_t devid;
36 static struct intel_batchbuffer *batch;
37 static struct nouveau_object *nchannel, *pcopy;
38 static struct nouveau_bufctx *nbufctx;
39 static struct nouveau_pushbuf *npush;
40 
41 static struct nouveau_bo *query_bo;
42 static uint32_t query_counter;
43 static volatile uint32_t *query;
44 static uint32_t memtype_intel, tile_intel_y, tile_intel_x;
45 
46 #define SUBC_COPY(x) 6, (x)
47 #define NV01_SUBCHAN_OBJECT 0
48 
49 #define NV01_SUBC(subc, mthd) SUBC_##subc((NV01_SUBCHAN_##mthd))
50 
51 typedef struct {
52 	uint32_t w, h;
53 	uint32_t pitch, lines;
54 } rect;
55 
nv_bo_alloc(struct nouveau_bo ** bo,rect * r,uint32_t w,uint32_t h,uint32_t tile_mode,int handle,uint32_t dom)56 static void nv_bo_alloc(struct nouveau_bo **bo, rect *r,
57 			uint32_t w, uint32_t h, uint32_t tile_mode,
58 			int handle, uint32_t dom)
59 {
60 	uint32_t size;
61 	uint32_t dx = 1, dy = 1, memtype = 0;
62 
63 	*bo = NULL;
64 	if (tile_mode) {
65 		uint32_t tile_y;
66 		uint32_t tile_x;
67 
68 		/* Y major tiling */
69 		if ((tile_mode & 0xf) == 0xe)
70 			/* but the internal layout is different */
71 			tile_x = 7;
72 		else
73 			tile_x = 6 + (tile_mode & 0xf);
74 		if (ndev->chipset < 0xc0) {
75 			memtype = 0x70;
76 			tile_y = 2;
77 		} else {
78 			memtype = 0xfe;
79 			tile_y = 3;
80 		}
81 		if ((tile_mode & 0xf) == 0xe)
82 			memtype = memtype_intel;
83 		tile_y += ((tile_mode & 0xf0)>>4);
84 
85 		dx = 1 << tile_x;
86 		dy = 1 << tile_y;
87 		igt_debug("Tiling requirements: x y %u %u\n", dx, dy);
88 	}
89 
90 	r->w = w;
91 	r->h = h;
92 
93 	r->pitch = w = (w + dx-1) & ~(dx-1);
94 	r->lines = h = (h + dy-1) & ~(dy-1);
95 	size = w*h;
96 
97 	if (handle < 0) {
98 		union nouveau_bo_config cfg;
99 		cfg.nv50.memtype = memtype;
100 		cfg.nv50.tile_mode = tile_mode;
101 		if (dom == NOUVEAU_BO_GART)
102 			dom |= NOUVEAU_BO_MAP;
103 		igt_assert(nouveau_bo_new(ndev, dom, 4096, size, &cfg, bo) == 0);
104 		igt_assert(nouveau_bo_map(*bo, NOUVEAU_BO_RDWR, nclient) == 0);
105 
106 		igt_debug("new flags %08x memtype %08x tile %08x\n",
107 			  (*bo)->flags, (*bo)->config.nv50.memtype,
108 			  (*bo)->config.nv50.tile_mode);
109 		if (tile_mode == tile_intel_y || tile_mode == tile_intel_x) {
110 			igt_debug("tile mode was: %02x, now: %02x\n",
111 				  (*bo)->config.nv50.tile_mode, tile_mode);
112 			/* Doesn't like intel tiling much.. */
113 			(*bo)->config.nv50.tile_mode = tile_mode;
114 		}
115 	} else {
116 		igt_assert(nouveau_bo_prime_handle_ref(ndev, handle, bo) == 0);
117 		close(handle);
118 		igt_assert_f((*bo)->size >= size,
119 			     "expected bo size to be at least %u,"
120 			     "but received %"PRIu64"\n", size, (*bo)->size);
121 		igt_debug("prime flags %08x memtype %08x tile %08x\n",
122 			  (*bo)->flags, (*bo)->config.nv50.memtype,
123 			  (*bo)->config.nv50.tile_mode);
124 		(*bo)->config.nv50.memtype = memtype;
125 		(*bo)->config.nv50.tile_mode = tile_mode;
126 	}
127 	igt_debug("size: %"PRIu64"\n", (*bo)->size);
128 }
129 
130 static inline void
PUSH_DATA(struct nouveau_pushbuf * push,uint32_t data)131 PUSH_DATA(struct nouveau_pushbuf *push, uint32_t data)
132 {
133 	*push->cur++ = data;
134 }
135 
136 static inline void
BEGIN_NV04(struct nouveau_pushbuf * push,int subc,int mthd,int size)137 BEGIN_NV04(struct nouveau_pushbuf *push, int subc, int mthd, int size)
138 {
139 	PUSH_DATA (push, 0x00000000 | (size << 18) | (subc << 13) | mthd);
140 }
141 
142 static inline void
BEGIN_NI04(struct nouveau_pushbuf * push,int subc,int mthd,int size)143 BEGIN_NI04(struct nouveau_pushbuf *push, int subc, int mthd, int size)
144 {
145 	PUSH_DATA (push, 0x40000000 | (size << 18) | (subc << 13) | mthd);
146 }
147 
148 static inline void
BEGIN_NVC0(struct nouveau_pushbuf * push,int subc,int mthd,int size)149 BEGIN_NVC0(struct nouveau_pushbuf *push, int subc, int mthd, int size)
150 {
151 	PUSH_DATA (push, 0x20000000 | (size << 16) | (subc << 13) | (mthd / 4));
152 }
153 
154 static inline void
BEGIN_NVXX(struct nouveau_pushbuf * push,int subc,int mthd,int size)155 BEGIN_NVXX(struct nouveau_pushbuf *push, int subc, int mthd, int size)
156 {
157 	if (ndev->chipset < 0xc0)
158 		BEGIN_NV04(push, subc, mthd, size);
159 	else
160 		BEGIN_NVC0(push, subc, mthd, size);
161 }
162 
163 static void
noop_intel(drm_intel_bo * bo)164 noop_intel(drm_intel_bo *bo)
165 {
166 	BEGIN_BATCH(3, 1);
167 	OUT_BATCH(MI_NOOP);
168 	OUT_BATCH(MI_BATCH_BUFFER_END);
169 	OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER,
170 			I915_GEM_DOMAIN_RENDER, 0);
171 	ADVANCE_BATCH();
172 
173 	intel_batchbuffer_flush(batch);
174 }
175 
find_and_open_devices(void)176 static void find_and_open_devices(void)
177 {
178 	int i;
179 	char path[80], *unused;
180 	struct stat buf;
181 	FILE *fl;
182 	char vendor_id[8] = {};
183 	int venid;
184 	for (i = 0; i < 9; i++) {
185 		sprintf(path, "/sys/class/drm/card%d/device/vendor", i);
186 		if (stat(path, &buf))
187 			break;
188 
189 		fl = fopen(path, "r");
190 		if (!fl)
191 			break;
192 
193 		unused = fgets(vendor_id, sizeof(vendor_id)-1, fl);
194 		(void)unused;
195 		fclose(fl);
196 
197 		venid = strtoul(vendor_id, NULL, 16);
198 		sprintf(path, "/dev/dri/card%d", i);
199 		if (venid == 0x8086) {
200 			intel_fd = open(path, O_RDWR);
201 			igt_assert(intel_fd);
202 		} else if (venid == 0x10de) {
203 			nouveau_fd = open(path, O_RDWR);
204 			igt_assert(nouveau_fd);
205 		}
206 	}
207 }
208 
init_nouveau(void)209 static void init_nouveau(void)
210 {
211 	struct nv04_fifo nv04_data = { .vram = 0xbeef0201,
212 				       .gart = 0xbeef0202 };
213 	struct nvc0_fifo nvc0_data = { };
214 	struct nouveau_fifo *fifo;
215 	int size;
216 	uint32_t class;
217 	void *data;
218 
219 	igt_assert(nouveau_device_wrap(nouveau_fd, 0, &ndev) == 0);
220 
221 	igt_assert(nouveau_client_new(ndev, &nclient) == 0);
222 
223 	igt_skip_on_f(ndev->chipset < 0xa3 || ndev->chipset == 0xaa || ndev->chipset == 0xac,
224 		      "Your card doesn't support PCOPY\n");
225 
226 	// TODO: Get a kepler and add support for it
227 	igt_skip_on_f(ndev->chipset >= 0xe0,
228 		      "Unsure how kepler works!\n");
229 	igt_assert(nouveau_bo_new(ndev,  NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
230 				  4096, 4096, NULL, &query_bo) == 0);
231 	igt_assert(nouveau_bo_map(query_bo, NOUVEAU_BO_RDWR, nclient) == 0);
232 	query = query_bo->map;
233 	*query = query_counter;
234 
235 	if (ndev->chipset < 0xc0) {
236 		class = 0x85b5;
237 		data = &nv04_data;
238 		size = sizeof(nv04_data);
239 	} else {
240 		class = ndev->chipset < 0xe0 ? 0x490b5 : 0xa0b5;
241 		data = &nvc0_data;
242 		size = sizeof(nvc0_data);
243 	}
244 
245 	igt_assert(nouveau_object_new(&ndev->object, 0, NOUVEAU_FIFO_CHANNEL_CLASS,
246 				      data, size, &nchannel) == 0);
247 
248 	fifo = nchannel->data;
249 
250 	igt_assert(nouveau_pushbuf_new(nclient, nchannel, 4, 32 * 1024,
251 				       true, &npush) == 0);
252 
253 	igt_assert(nouveau_bufctx_new(nclient, 1, &nbufctx) == 0);
254 
255 	npush->user_priv = nbufctx;
256 
257 	/* Hope this is enough init for PCOPY */
258 	igt_assert(nouveau_object_new(nchannel, class, class & 0xffff, NULL, 0, &pcopy) == 0);
259 	igt_assert(nouveau_pushbuf_space(npush, 512, 0, 0) == 0);
260 
261 	if (ndev->chipset < 0xc0) {
262 		struct nv04_fifo *nv04_fifo = (struct nv04_fifo*)fifo;
263 		tile_intel_y = 0x3e;
264 		tile_intel_x = 0x13;
265 
266 		BEGIN_NV04(npush, NV01_SUBC(COPY, OBJECT), 1);
267 		PUSH_DATA(npush, pcopy->handle);
268 		BEGIN_NV04(npush, SUBC_COPY(0x0180), 3);
269 		PUSH_DATA(npush, nv04_fifo->vram);
270 		PUSH_DATA(npush, nv04_fifo->vram);
271 		PUSH_DATA(npush, nv04_fifo->vram);
272 	} else {
273 		tile_intel_y = 0x2e;
274 		tile_intel_x = 0x03;
275 		BEGIN_NVC0(npush, NV01_SUBC(COPY, OBJECT), 1);
276 		PUSH_DATA(npush, pcopy->handle);
277 	}
278 	nouveau_pushbuf_kick(npush, npush->channel);
279 }
280 
fill16(void * ptr,uint32_t val)281 static void fill16(void *ptr, uint32_t val)
282 {
283 	uint32_t *p = ptr;
284 	val = (val) | (val << 8) | (val << 16) | (val << 24);
285 	p[0] = p[1] = p[2] = p[3] = val;
286 }
287 
288 #define TILE_SIZE 4096
289 
swtile_y(uint8_t * out,const uint8_t * in,int w,int h)290 static void swtile_y(uint8_t *out, const uint8_t *in, int w, int h)
291 {
292 	uint32_t x, y, dx, dy;
293 	uint8_t *endptr = out + w * h;
294 	igt_assert(!(w % 128));
295 	igt_assert(!(h % 32));
296 
297 	for (y = 0; y < h; y += 32) {
298 		for (x = 0; x < w; x += 128, out += TILE_SIZE) {
299 			for (dx = 0; dx < 8; ++dx) {
300 				for (dy = 0; dy < 32; ++dy) {
301 					uint32_t out_ofs = (dx * 32 + dy) * 16;
302 					uint32_t in_ofs = (y + dy) * w + (x + 16 * dx);
303 					igt_assert(out_ofs < TILE_SIZE);
304 					igt_assert(in_ofs < w*h);
305 
306 					// To do the Y tiling quirk:
307 					// out_ofs = out_ofs ^ (((out_ofs >> 9) & 1) << 6);
308 					memcpy(&out[out_ofs], &in[in_ofs], 16);
309 				}
310 			}
311 		}
312 	}
313 	igt_assert(out == endptr);
314 }
315 
swtile_x(uint8_t * out,const uint8_t * in,int w,int h)316 static void swtile_x(uint8_t *out, const uint8_t *in, int w, int h)
317 {
318 	uint32_t x, y, dy;
319 	uint8_t *endptr = out + w * h;
320 	igt_assert(!(w % 512));
321 	igt_assert(!(h % 8));
322 
323 	for (y = 0; y < h; y += 8) {
324 		for (x = 0; x < w; x += 512, out += TILE_SIZE) {
325 			for (dy = 0; dy < 8; ++dy) {
326 				uint32_t out_ofs = 512 * dy;
327 				uint32_t in_ofs = (y + dy) * w + x;
328 				igt_assert(out_ofs < TILE_SIZE);
329 				igt_assert(in_ofs < w*h);
330 				memcpy(&out[out_ofs], &in[in_ofs], 512);
331 			}
332 		}
333 	}
334 	igt_assert(out == endptr);
335 }
336 
perform_copy(struct nouveau_bo * nvbo,const rect * dst,uint32_t dst_x,uint32_t dst_y,struct nouveau_bo * nvbi,const rect * src,uint32_t src_x,uint32_t src_y,uint32_t w,uint32_t h)337 static void perform_copy(struct nouveau_bo *nvbo, const rect *dst,
338 			 uint32_t dst_x, uint32_t dst_y,
339 			 struct nouveau_bo *nvbi, const rect *src,
340 			 uint32_t src_x, uint32_t src_y,
341 			 uint32_t w, uint32_t h)
342 {
343 	struct nouveau_pushbuf_refn refs[] = {
344 		{ nvbi, (nvbi->flags & NOUVEAU_BO_APER) | NOUVEAU_BO_RD },
345 		{ nvbo, (nvbo->flags & NOUVEAU_BO_APER) | NOUVEAU_BO_WR },
346 		{ query_bo, NOUVEAU_BO_GART | NOUVEAU_BO_RDWR }
347 	};
348 	uint32_t cpp = 1, exec = 0x00003000; /* QUERY|QUERY_SHORT|FORMAT */
349 	uint32_t src_off = 0, dst_off = 0;
350 	struct nouveau_pushbuf *push = npush;
351 	int ret;
352 
353 	if (nvbi->config.nv50.tile_mode == tile_intel_y)
354 		igt_debug("src is y-tiled\n");
355 	if (nvbo->config.nv50.tile_mode == tile_intel_y)
356 		igt_debug("dst is y-tiled\n");
357 
358 	igt_assert(nouveau_pushbuf_space(push, 64, 0, 0) == 0);
359 	igt_assert(nouveau_pushbuf_refn(push, refs, 3) == 0);
360 
361 	if (!nvbi->config.nv50.tile_mode) {
362 		src_off = src_y * src->pitch + src_x;
363 		exec |= 0x00000010;
364 	}
365 
366 	if (!nvbo->config.nv50.tile_mode) {
367 		dst_off = dst_y * dst->pitch + dst_x;
368 		exec |= 0x00000100;
369 	}
370 
371 	BEGIN_NVXX(push, SUBC_COPY(0x0200), 7);
372 	PUSH_DATA (push, nvbi->config.nv50.tile_mode);
373 	PUSH_DATA (push, src->pitch / cpp);
374 	PUSH_DATA (push, src->h);
375 	PUSH_DATA (push, 1);
376 	PUSH_DATA (push, 0);
377 	PUSH_DATA (push, src_x / cpp);
378 	PUSH_DATA (push, src_y);
379 
380 	BEGIN_NVXX(push, SUBC_COPY(0x0220), 7);
381 	PUSH_DATA (push, nvbo->config.nv50.tile_mode);
382 	PUSH_DATA (push, dst->pitch / cpp);
383 	PUSH_DATA (push, dst->h);
384 	PUSH_DATA (push, 1);
385 	PUSH_DATA (push, 0);
386 	PUSH_DATA (push, dst_x / cpp);
387 	PUSH_DATA (push, dst_y);
388 
389 	BEGIN_NVXX(push, SUBC_COPY(0x030c), 9);
390 	PUSH_DATA (push, (nvbi->offset + src_off) >> 32);
391 	PUSH_DATA (push, (nvbi->offset + src_off));
392 	PUSH_DATA (push, (nvbo->offset + dst_off) >> 32);
393 	PUSH_DATA (push, (nvbo->offset + dst_off));
394 	PUSH_DATA (push, src->pitch);
395 	PUSH_DATA (push, dst->pitch);
396 	PUSH_DATA (push, w / cpp);
397 	PUSH_DATA (push, h);
398 	PUSH_DATA (push, 0x03333120);
399 
400 	BEGIN_NVXX(push, SUBC_COPY(0x0338), 3);
401 	PUSH_DATA (push, (query_bo->offset) >> 32);
402 	PUSH_DATA (push, (query_bo->offset));
403 	PUSH_DATA (push, ++query_counter);
404 
405 	BEGIN_NVXX(push, SUBC_COPY(0x0300), 1);
406 	PUSH_DATA (push, exec);
407 
408 	ret = nouveau_pushbuf_kick(push, push->channel);
409 	while (!ret && *query < query_counter) { usleep(1000); }
410 
411 	igt_assert_eq(ret, 0);
412 }
413 
check1_macro(uint32_t * p,uint32_t w,uint32_t h)414 static void check1_macro(uint32_t *p, uint32_t w, uint32_t h)
415 {
416 	uint32_t i, val, j;
417 
418 	for (i = 0; i < 256; ++i, p += 4) {
419 		val = (i) | (i << 8) | (i << 16) | (i << 24);
420 		igt_assert_f(p[0] == val && p[1] == val && p[2] == val && p[3] == val,
421 			     "Retile check failed in first tile!\n"
422 			     "%08x %08x %08x %08x instead of %08x\n",
423 			     p[0], p[1], p[2], p[3], val);
424 	}
425 
426 	val = 0x3e3e3e3e;
427 	for (i = 0; i < 256 * (w-1); ++i, p += 4) {
428 		igt_assert_f(p[0] == val && p[1] == val && p[2] == val && p[3] == val,
429 			     "Retile check failed in second tile!\n"
430 			     "%08x %08x %08x %08x instead of %08x\n",
431 			     p[0], p[1], p[2], p[3], val);
432 	}
433 
434 	for (j = 1; j < h; ++j) {
435 		val = 0x7e7e7e7e;
436 		for (i = 0; i < 256; ++i, p += 4) {
437 			igt_assert_f(p[0] == val && p[1] == val && p[2] == val && p[3] == val,
438 				     "Retile check failed in third tile!\n"
439 				     "%08x %08x %08x %08x instead of %08x\n",
440 				     p[0], p[1], p[2], p[3], val);
441 		}
442 
443 		val = 0xcececece;
444 		for (i = 0; i < 256 * (w-1); ++i, p += 4) {
445 			igt_assert_f(p[0] == val && p[1] == val && p[2] == val && p[3] == val,
446 				     "Retile check failed in fourth tile!\n"
447 				     "%08x %08x %08x %08x instead of %08x\n",
448 				     p[0], p[1], p[2], p[3], val);
449 		}
450 	}
451 }
452 
453 /* test 1, see if we can copy from linear to intel Y format safely */
test1_macro(void)454 static void test1_macro(void)
455 {
456 	int prime_fd = -1;
457 	struct nouveau_bo *nvbo = NULL, *nvbi = NULL;
458 	rect dst, src;
459 	uint8_t *ptr;
460 	uint32_t w = 2 * 128, h = 2 * 32, x, y;
461 
462 	nv_bo_alloc(&nvbi, &src, w, h, 0, -1, NOUVEAU_BO_GART);
463 	nv_bo_alloc(&nvbo, &dst, w, h, tile_intel_y, -1, NOUVEAU_BO_GART);
464 
465 	nouveau_bo_set_prime(nvbo, &prime_fd);
466 
467 	/* Set up something for our tile that should map into the first
468 	 * y-major tile, assuming my understanding of documentation is
469 	 * correct
470 	 */
471 
472 	/* First tile should be read out in groups of 16 bytes that
473 	 * are all set to a linear increasing value..
474 	 */
475 	ptr = nvbi->map;
476 	for (x = 0; x < 128; x += 16)
477 		for (y = 0; y < 32; ++y)
478 			fill16(&ptr[y * w + x], x * 2 + y);
479 
480 	/* second tile */
481 	for (x = 128; x < w; x += 16)
482 		for (y = 0; y < 32; ++y)
483 			fill16(&ptr[y * w + x], 0x3e);
484 
485 	/* third tile */
486 	for (x = 0; x < 128; x += 16)
487 		for (y = 32; y < h; ++y)
488 			fill16(&ptr[y * w + x], 0x7e);
489 
490 	/* last tile */
491 	for (x = 128; x < w; x += 16)
492 		for (y = 32; y < h; ++y)
493 			fill16(&ptr[y * w + x], 0xce);
494 	memset(nvbo->map, 0xfc, w * h);
495 
496 	if (pcopy)
497 		perform_copy(nvbo, &dst, 0, 0, nvbi, &src, 0, 0, w, h);
498 	else
499 		swtile_y(nvbo->map, nvbi->map, w, h);
500 	check1_macro(nvbo->map, w/128, h/32);
501 
502 	nouveau_bo_ref(NULL, &nvbo);
503 	nouveau_bo_ref(NULL, &nvbi);
504 	close(prime_fd);
505 }
506 
dump_line(uint8_t * map)507 static void dump_line(uint8_t *map)
508 {
509 	uint32_t dx, dy;
510 	igt_debug("Dumping sub-tile:\n");
511 	for (dy = 0; dy < 32; ++dy) {
512 		for (dx = 0; dx < 15; ++dx, ++map) {
513 			igt_debug("%02x ", *map);
514 		}
515 		igt_debug("%02x\n", *(map++));
516 	}
517 }
518 
check1_micro(void * map,uint32_t pitch,uint32_t lines,uint32_t dst_x,uint32_t dst_y,uint32_t w,uint32_t h)519 static void check1_micro(void *map, uint32_t pitch, uint32_t lines,
520 			 uint32_t dst_x, uint32_t dst_y, uint32_t w, uint32_t h)
521 {
522 	uint32_t x, y;
523 
524 	/* check only the relevant subrectangle [0..w) [0...h) */
525 	uint8_t *m = map;
526 	for (y = 0; y < h; ++y, m += pitch) {
527 		for (x = 0; x < w; ++x) {
528 			uint8_t expected = ((y & 3) << 6) | (x & 0x3f);
529 
530 			if (expected != m[x])
531 				dump_line(m);
532 
533 			igt_assert_f(expected == m[x],
534 				     "failed check at x=%u y=%u, expected %02x got %02x\n",
535 				     x, y, expected, m[x]);
536 		}
537 	}
538 }
539 
540 /* test 1, but check micro format, should be unaffected by bit9 swizzling */
test1_micro(void)541 static void test1_micro(void)
542 {
543 	struct nouveau_bo *bo_intel = NULL, *bo_nvidia = NULL, *bo_linear = NULL;
544 	rect intel, nvidia, linear;
545 	uint32_t tiling = I915_TILING_Y;
546 
547 	uint32_t src_x = 0, src_y = 0;
548 	uint32_t dst_x = 0, dst_y = 0;
549 	uint32_t x, y, w = 256, h = 64;
550 
551 	drm_intel_bo *test_intel_bo;
552 	int prime_fd;
553 
554 	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", w * h, 4096);
555 	igt_assert(test_intel_bo);
556 	drm_intel_bo_set_tiling(test_intel_bo, &tiling, w);
557 	igt_assert(tiling == I915_TILING_Y);
558 	igt_assert(drm_intel_gem_bo_map_gtt(test_intel_bo) == 0);
559 
560 	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
561 	igt_assert_lte(0, prime_fd);
562 	noop_intel(test_intel_bo);
563 
564 	nv_bo_alloc(&bo_intel, &intel, w, h, tile_intel_y, prime_fd, 0);
565 	nv_bo_alloc(&bo_nvidia, &nvidia, w, h, 0x10, -1, NOUVEAU_BO_VRAM);
566 	nv_bo_alloc(&bo_linear, &linear, w, h, 0, -1, NOUVEAU_BO_GART);
567 
568 	for (y = 0; y < linear.h; ++y) {
569 		uint8_t *map = bo_linear->map;
570 		map += y * linear.pitch;
571 		for (x = 0; x < linear.pitch; ++x) {
572 			uint8_t pos = x & 0x3f;
573 			/* low 4 bits: micro tile pos */
574 			/* 2 bits: x pos in tile (wraps) */
575 			/* 2 bits: y pos in tile (wraps) */
576 			pos |= (y & 3) << 6;
577 			map[x] = pos;
578 		}
579 	}
580 
581 	perform_copy(bo_nvidia, &nvidia, 0, 0, bo_linear, &linear, 0, 0, nvidia.pitch, nvidia.h);
582 
583 	/* Perform the actual sub rectangle copy */
584 	if (pcopy)
585 		perform_copy(bo_intel, &intel, dst_x, dst_y, bo_nvidia, &nvidia, src_x, src_y, w, h);
586 	else
587 		swtile_y(test_intel_bo->virtual, bo_linear->map, w, h);
588 
589 	noop_intel(test_intel_bo);
590 	check1_micro(test_intel_bo->virtual, intel.pitch, intel.h, dst_x, dst_y, w, h);
591 
592 	nouveau_bo_ref(NULL, &bo_linear);
593 	nouveau_bo_ref(NULL, &bo_nvidia);
594 	nouveau_bo_ref(NULL, &bo_intel);
595 	drm_intel_bo_unreference(test_intel_bo);
596 }
597 
598 /* test 2, see if we can copy from linear to intel X format safely
599  * Seems nvidia lacks a method to do it, so just keep this test
600  * as a reference for potential future tests. Software tiling is
601  * used for now
602  */
test2(void)603 static void test2(void)
604 {
605 	struct nouveau_bo *nvbo = NULL, *nvbi = NULL;
606 	rect dst, src;
607 	uint8_t *ptr;
608 	uint32_t w = 1024, h = 16, x, y;
609 
610 	nv_bo_alloc(&nvbi, &src, w, h, 0, -1, NOUVEAU_BO_GART);
611 	nv_bo_alloc(&nvbo, &dst, w, h, tile_intel_x, -1, NOUVEAU_BO_GART);
612 
613 	/* Set up something for our tile that should map into the first
614 	 * y-major tile, assuming my understanding of documentation is
615 	 * correct
616 	 */
617 
618 	/* First tile should be read out in groups of 16 bytes that
619 	 * are all set to a linear increasing value..
620 	 */
621 	ptr = nvbi->map;
622 	for (y = 0; y < 8; ++y)
623 		for (x = 0; x < 512; x += 16)
624 			fill16(&ptr[y * w + x], (y * 512 + x)/16);
625 
626 	for (y = 0; y < 8; ++y)
627 		for (x = 512; x < w; x += 16)
628 			fill16(&ptr[y * w + x], 0x3e);
629 
630 	for (y = 8; y < h; ++y)
631 		for (x = 0; x < 512; x += 16)
632 			fill16(&ptr[y * w + x], 0x7e);
633 
634 	for (y = 8; y < h; ++y)
635 		for (x = 512; x < w; x += 16)
636 			fill16(&ptr[y * w + x], 0xce);
637 	memset(nvbo->map, 0xfc, w * h);
638 
639 	/* do this in software, there is no X major tiling in PCOPY (yet?) */
640 	if (0 && pcopy)
641 		perform_copy(nvbo, &dst, 0, 0, nvbi, &src, 0, 0, w, h);
642 	else
643 		swtile_x(nvbo->map, nvbi->map, w, h);
644 	check1_macro(nvbo->map, w/512, h/8);
645 
646 	nouveau_bo_ref(NULL, &nvbo);
647 	nouveau_bo_ref(NULL, &nvbi);
648 }
649 
check3(const uint32_t * p,uint32_t pitch,uint32_t lines,uint32_t sub_x,uint32_t sub_y,uint32_t sub_w,uint32_t sub_h)650 static void check3(const uint32_t *p, uint32_t pitch, uint32_t lines,
651 		   uint32_t sub_x, uint32_t sub_y,
652 		   uint32_t sub_w, uint32_t sub_h)
653 {
654 	uint32_t x, y;
655 
656 	sub_w += sub_x;
657 	sub_h += sub_y;
658 
659 	igt_assert_f(p[pitch * lines / 4 - 1] != 0x03030303,
660 		     "copy failed: Not all lines have been copied back!\n");
661 
662 	for (y = 0; y < lines; ++y) {
663 		for (x = 0; x < pitch; x += 4, ++p) {
664 			uint32_t expected;
665 			if ((x < sub_x || x >= sub_w) ||
666 			    (y < sub_y || y >= sub_h))
667 				expected = 0x80808080;
668 			else
669 				expected = 0x04040404;
670 			igt_assert_f(*p == expected,
671 				     "%u,%u should be %08x, but is %08x\n",
672 				     x, y, expected, *p);
673 		}
674 	}
675 }
676 
677 /* copy from nvidia bo to intel bo and copy to a linear bo to check if tiling went succesful */
test3_base(int tile_src,int tile_dst)678 static void test3_base(int tile_src, int tile_dst)
679 {
680 	struct nouveau_bo *bo_intel = NULL, *bo_nvidia = NULL, *bo_linear = NULL;
681 	rect intel, nvidia, linear;
682 	uint32_t cpp = 4;
683 
684 	uint32_t src_x = 1 * cpp, src_y = 1;
685 	uint32_t dst_x = 2 * cpp, dst_y = 26;
686 	uint32_t w = 298 * cpp, h = 298;
687 
688 	drm_intel_bo *test_intel_bo;
689 	int prime_fd;
690 
691 	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", 2048 * cpp * 768, 4096);
692 	igt_assert(test_intel_bo);
693 
694 	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
695 	igt_assert_lte(0, prime_fd);
696 
697 	nv_bo_alloc(&bo_intel, &intel, 2048 * cpp, 768, tile_dst, prime_fd, 0);
698 	nv_bo_alloc(&bo_nvidia, &nvidia, 300 * cpp, 300, tile_src, -1, NOUVEAU_BO_VRAM);
699 	nv_bo_alloc(&bo_linear, &linear, 2048 * cpp, 768, 0, -1, NOUVEAU_BO_GART);
700 
701 	noop_intel(test_intel_bo);
702 	memset(bo_linear->map, 0x80, bo_linear->size);
703 	perform_copy(bo_intel, &intel, 0, 0, bo_linear, &linear, 0, 0, linear.pitch, linear.h);
704 	noop_intel(test_intel_bo);
705 
706 	memset(bo_linear->map, 0x04, bo_linear->size);
707 	perform_copy(bo_nvidia, &nvidia, 0, 0, bo_linear, &linear, 0, 0, nvidia.pitch, nvidia.h);
708 
709 	/* Perform the actual sub rectangle copy */
710 	noop_intel(test_intel_bo);
711 	perform_copy(bo_intel, &intel, dst_x, dst_y, bo_nvidia, &nvidia, src_x, src_y, w, h);
712 	noop_intel(test_intel_bo);
713 
714 	memset(bo_linear->map, 0x3, bo_linear->size);
715 	noop_intel(test_intel_bo);
716 	perform_copy(bo_linear, &linear, 0, 0, bo_intel, &intel, 0, 0, intel.pitch, intel.h);
717 	noop_intel(test_intel_bo);
718 
719 	check3(bo_linear->map, linear.pitch, linear.h, dst_x, dst_y, w, h);
720 
721 	nouveau_bo_ref(NULL, &bo_linear);
722 	nouveau_bo_ref(NULL, &bo_nvidia);
723 	nouveau_bo_ref(NULL, &bo_intel);
724 	drm_intel_bo_unreference(test_intel_bo);
725 }
726 
test3_1(void)727 static void test3_1(void)
728 {
729 	/* nvidia tiling to intel */
730 	test3_base(0x40, tile_intel_y);
731 }
732 
test3_2(void)733 static void test3_2(void)
734 {
735 	/* intel tiling to nvidia */
736 	test3_base(tile_intel_y, 0x40);
737 }
738 
test3_3(void)739 static void test3_3(void)
740 {
741 	/* intel tiling to linear */
742 	test3_base(tile_intel_y, 0);
743 }
744 
test3_4(void)745 static void test3_4(void)
746 {
747 	/* linear tiling to intel */
748 	test3_base(0, tile_intel_y);
749 }
750 
test3_5(void)751 static void test3_5(void)
752 {
753 	/* linear to linear */
754 	test3_base(0, 0);
755 }
756 
757 /* Acquire when == SEQUENCE */
758 #define SEMA_ACQUIRE_EQUAL 1
759 
760 /* Release, and write a 16 byte query structure to sema:
761  * { (uint32)seq, (uint32)0, (uint64)timestamp } */
762 #define SEMA_WRITE_LONG 2
763 
764 /* Acquire when >= SEQUENCE */
765 #define SEMA_ACQUIRE_GEQUAL 4
766 
767 /* Test only new style semaphores, old ones are AWFUL */
test_semaphore(void)768 static void test_semaphore(void)
769 {
770 	drm_intel_bo *test_intel_bo = NULL;
771 	struct nouveau_bo *sema_bo = NULL;
772 	int prime_fd;
773 	uint32_t *sema;
774 	struct nouveau_pushbuf *push = npush;
775 
776 	igt_skip_on(ndev->chipset < 0x84);
777 
778 	/* Should probably be kept in sysmem */
779 	test_intel_bo = drm_intel_bo_alloc(bufmgr, "semaphore bo", 4096, 4096);
780 	igt_assert(test_intel_bo);
781 
782 	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
783 	igt_assert_lte(0, prime_fd);
784 	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &sema_bo) == 0);
785 	close(prime_fd);
786 
787 	igt_assert(drm_intel_gem_bo_map_gtt(test_intel_bo) == 0);
788 	sema = test_intel_bo->virtual;
789 	sema++;
790 	*sema = 0;
791 
792 	igt_assert(nouveau_pushbuf_space(push, 64, 0, 0) == 0);
793 	igt_assert(nouveau_pushbuf_refn(push, &(struct nouveau_pushbuf_refn)
794 					{ sema_bo, NOUVEAU_BO_GART|NOUVEAU_BO_RDWR }, 1) == 0);
795 
796 	if (ndev->chipset < 0xc0) {
797 		struct nv04_fifo *nv04_fifo = nchannel->data;
798 		/* kernel binds it's own dma object here and overwrites old one,
799 		 * so just rebind vram every time we submit
800 		 */
801 		BEGIN_NV04(npush, SUBC_COPY(0x0060), 1);
802 		PUSH_DATA(npush, nv04_fifo->vram);
803 	}
804 	BEGIN_NVXX(push, SUBC_COPY(0x0010), 4);
805 	PUSH_DATA(push, sema_bo->offset >> 32);
806 	PUSH_DATA(push, sema_bo->offset + 4);
807 	PUSH_DATA(push, 2); // SEQUENCE
808 	PUSH_DATA(push, SEMA_WRITE_LONG); // TRIGGER
809 
810 	BEGIN_NVXX(push, SUBC_COPY(0x0018), 2);
811 	PUSH_DATA(push, 3);
812 	PUSH_DATA(push, SEMA_ACQUIRE_EQUAL);
813 	BEGIN_NVXX(push, SUBC_COPY(0x0018), 2);
814 	PUSH_DATA(push, 4);
815 	PUSH_DATA(push, SEMA_WRITE_LONG);
816 
817 	BEGIN_NVXX(push, SUBC_COPY(0x0018), 2);
818 	PUSH_DATA(push, 5);
819 	PUSH_DATA(push, SEMA_ACQUIRE_GEQUAL);
820 	BEGIN_NVXX(push, SUBC_COPY(0x0018), 2);
821 	PUSH_DATA(push, 6);
822 	PUSH_DATA(push, SEMA_WRITE_LONG);
823 
824 	BEGIN_NVXX(push, SUBC_COPY(0x0018), 2);
825 	PUSH_DATA(push, 7);
826 	PUSH_DATA(push, SEMA_ACQUIRE_GEQUAL);
827 	BEGIN_NVXX(push, SUBC_COPY(0x0018), 2);
828 	PUSH_DATA(push, 9);
829 	PUSH_DATA(push, SEMA_WRITE_LONG);
830 	nouveau_pushbuf_kick(push, push->channel);
831 
832 	usleep(1000);
833 	igt_assert(*sema == 2);
834 
835 	*sema = 3;
836 	usleep(1000);
837 	igt_assert(*sema == 4);
838 
839 	*sema = 5;
840 	usleep(1000);
841 	igt_assert(*sema == 6);
842 
843 	*sema = 8;
844 	usleep(1000);
845 	igt_assert(*sema == 9);
846 
847 	nouveau_bo_ref(NULL, &sema_bo);
848 	drm_intel_bo_unreference(test_intel_bo);
849 }
850 
851 igt_main
852 {
853 	igt_fixture {
854 		find_and_open_devices();
855 
856 		igt_require(nouveau_fd != -1);
857 		igt_require(intel_fd != -1);
858 
859 		/* set up intel bufmgr */
860 		bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
861 		igt_assert(bufmgr);
862 		/* Do not enable reuse, we share (almost) all buffers. */
863 		//drm_intel_bufmgr_gem_enable_reuse(bufmgr);
864 
865 		/* set up nouveau bufmgr */
866 		init_nouveau();
867 
868 		/* set up an intel batch buffer */
869 		devid = intel_get_drm_devid(intel_fd);
870 		batch = intel_batchbuffer_alloc(bufmgr, devid);
871 		igt_assert(batch);
872 	}
873 
874 #define xtest(x, args...) \
875 	igt_subtest( #x ) \
876 		(x)(args);
877 
878 	xtest(test1_macro);
879 	xtest(test1_micro);
880 	//xtest(test1_swizzle);
881 	xtest(test2);
882 	xtest(test3_1);
883 	xtest(test3_2);
884 	xtest(test3_3);
885 	xtest(test3_4);
886 	xtest(test3_5);
887 	xtest(test_semaphore);
888 
889 	igt_fixture {
890 		nouveau_bo_ref(NULL, &query_bo);
891 		nouveau_object_del(&pcopy);
892 		nouveau_bufctx_del(&nbufctx);
893 		nouveau_pushbuf_del(&npush);
894 		nouveau_object_del(&nchannel);
895 
896 		intel_batchbuffer_free(batch);
897 
898 		nouveau_client_del(&nclient);
899 		nouveau_device_del(&ndev);
900 		drm_intel_bufmgr_destroy(bufmgr);
901 
902 		close(intel_fd);
903 		close(nouveau_fd);
904 	}
905 }
906