1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "igt.h"
25 #include <stdbool.h>
26 #include <unistd.h>
27 #include <stdlib.h>
28 #include <sys/ioctl.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <errno.h>
34 #include <sys/stat.h>
35 #include <sys/time.h>
36
37 #include <drm.h>
38
39 #include "intel_bufmgr.h"
40 #include "igt_debugfs.h"
41
42 #define WIDTH 512
43 #define STRIDE (WIDTH*4)
44 #define HEIGHT 512
45 #define SIZE (HEIGHT*STRIDE)
46
create_bo(drm_intel_bufmgr * bufmgr,uint32_t pixel)47 static drm_intel_bo *create_bo(drm_intel_bufmgr *bufmgr,
48 uint32_t pixel)
49 {
50 uint64_t value = (uint64_t)pixel << 32 | pixel, *v;
51 drm_intel_bo *bo;
52
53 bo = drm_intel_bo_alloc(bufmgr, "surface", SIZE, 4096);
54 igt_assert(bo);
55
56 do_or_die(drm_intel_bo_map(bo, 1));
57 v = bo->virtual;
58 for (int i = 0; i < SIZE / sizeof(value); i += 8) {
59 v[i + 0] = value; v[i + 1] = value;
60 v[i + 2] = value; v[i + 3] = value;
61 v[i + 4] = value; v[i + 5] = value;
62 v[i + 6] = value; v[i + 7] = value;
63 }
64 drm_intel_bo_unmap(bo);
65
66 return bo;
67 }
68
scratch_buf_init(struct igt_buf * buf,drm_intel_bufmgr * bufmgr,uint32_t pixel)69 static void scratch_buf_init(struct igt_buf *buf,
70 drm_intel_bufmgr *bufmgr,
71 uint32_t pixel)
72 {
73 memset(buf, 0, sizeof(*buf));
74
75 buf->bo = create_bo(bufmgr, pixel);
76 buf->stride = STRIDE;
77 buf->tiling = I915_TILING_NONE;
78 buf->size = SIZE;
79 buf->bpp = 32;
80 }
81
scratch_buf_fini(struct igt_buf * buf)82 static void scratch_buf_fini(struct igt_buf *buf)
83 {
84 drm_intel_bo_unreference(buf->bo);
85 memset(buf, 0, sizeof(*buf));
86 }
87
fork_rcs_copy(int timeout,uint32_t final,drm_intel_bo ** dst,int count,unsigned flags)88 static void fork_rcs_copy(int timeout, uint32_t final,
89 drm_intel_bo **dst, int count,
90 unsigned flags)
91 #define CREATE_CONTEXT 0x1
92 {
93 igt_render_copyfunc_t render_copy;
94 uint64_t mem_per_child;
95 int devid;
96
97 mem_per_child = SIZE;
98 if (flags & CREATE_CONTEXT)
99 mem_per_child += 2 * 128 * 1024; /* rough context sizes */
100 intel_require_memory(count, mem_per_child, CHECK_RAM);
101
102 for (int child = 0; child < count; child++) {
103 int fd = drm_open_driver(DRIVER_INTEL);
104 drm_intel_bufmgr *bufmgr;
105
106 devid = intel_get_drm_devid(fd);
107
108 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
109 igt_assert(bufmgr);
110
111 dst[child] = create_bo(bufmgr, ~0);
112
113 if (flags & CREATE_CONTEXT) {
114 drm_intel_context *ctx;
115
116 ctx = drm_intel_gem_context_create(dst[child]->bufmgr);
117 igt_require(ctx);
118 }
119
120 render_copy = igt_get_render_copyfunc(devid);
121 igt_require_f(render_copy,
122 "no render-copy function\n");
123 }
124
125 igt_fork(child, count) {
126 struct intel_batchbuffer *batch;
127 struct igt_buf buf = {};
128 struct igt_buf src;
129 unsigned long i;
130
131 batch = intel_batchbuffer_alloc(dst[child]->bufmgr,
132 devid);
133 igt_assert(batch);
134
135 if (flags & CREATE_CONTEXT) {
136 drm_intel_context *ctx;
137
138 ctx = drm_intel_gem_context_create(dst[child]->bufmgr);
139 intel_batchbuffer_set_context(batch, ctx);
140 }
141
142 buf.bo = dst[child];
143 buf.stride = STRIDE;
144 buf.tiling = I915_TILING_NONE;
145 buf.size = SIZE;
146 buf.bpp = 32;
147
148 i = 0;
149 igt_until_timeout(timeout) {
150 scratch_buf_init(&src, dst[child]->bufmgr,
151 i++ | child << 16);
152 render_copy(batch, NULL,
153 &src, 0, 0,
154 WIDTH, HEIGHT,
155 &buf, 0, 0);
156 scratch_buf_fini(&src);
157 }
158
159 scratch_buf_init(&src, dst[child]->bufmgr,
160 final | child << 16);
161 render_copy(batch, NULL,
162 &src, 0, 0,
163 WIDTH, HEIGHT,
164 &buf, 0, 0);
165 scratch_buf_fini(&src);
166 }
167 }
168
fork_bcs_copy(int timeout,uint32_t final,drm_intel_bo ** dst,int count)169 static void fork_bcs_copy(int timeout, uint32_t final,
170 drm_intel_bo **dst, int count)
171 {
172 int devid;
173
174 for (int child = 0; child < count; child++) {
175 drm_intel_bufmgr *bufmgr;
176 int fd = drm_open_driver(DRIVER_INTEL);
177
178 devid = intel_get_drm_devid(fd);
179
180 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
181 igt_assert(bufmgr);
182
183 dst[child] = create_bo(bufmgr, ~0);
184 }
185
186 igt_fork(child, count) {
187 struct intel_batchbuffer *batch;
188 drm_intel_bo *src[2];
189 unsigned long i;
190
191
192 batch = intel_batchbuffer_alloc(dst[child]->bufmgr,
193 devid);
194 igt_assert(batch);
195
196 i = 0;
197 igt_until_timeout(timeout) {
198 src[0] = create_bo(dst[child]->bufmgr,
199 ~0);
200 src[1] = create_bo(dst[child]->bufmgr,
201 i++ | child << 16);
202
203 intel_copy_bo(batch, src[0], src[1], SIZE);
204 intel_copy_bo(batch, dst[child], src[0], SIZE);
205
206 drm_intel_bo_unreference(src[1]);
207 drm_intel_bo_unreference(src[0]);
208 }
209
210 src[0] = create_bo(dst[child]->bufmgr,
211 ~0);
212 src[1] = create_bo(dst[child]->bufmgr,
213 final | child << 16);
214
215 intel_copy_bo(batch, src[0], src[1], SIZE);
216 intel_copy_bo(batch, dst[child], src[0], SIZE);
217
218 drm_intel_bo_unreference(src[1]);
219 drm_intel_bo_unreference(src[0]);
220 }
221 }
222
surfaces_check(drm_intel_bo ** bo,int count,uint32_t expected)223 static void surfaces_check(drm_intel_bo **bo, int count, uint32_t expected)
224 {
225 for (int child = 0; child < count; child++) {
226 uint32_t *ptr;
227
228 do_or_die(drm_intel_bo_map(bo[child], 0));
229 ptr = bo[child]->virtual;
230 for (int j = 0; j < SIZE/4; j++)
231 igt_assert_eq(ptr[j], expected | child << 16);
232 drm_intel_bo_unmap(bo[child]);
233 }
234 }
235
exec_and_get_offset(int fd,uint32_t batch)236 static uint64_t exec_and_get_offset(int fd, uint32_t batch)
237 {
238 struct drm_i915_gem_execbuffer2 execbuf;
239 struct drm_i915_gem_exec_object2 exec[1];
240 uint32_t batch_data[2] = { MI_BATCH_BUFFER_END };
241
242 gem_write(fd, batch, 0, batch_data, sizeof(batch_data));
243
244 memset(exec, 0, sizeof(exec));
245 exec[0].handle = batch;
246
247 memset(&execbuf, 0, sizeof(execbuf));
248 execbuf.buffers_ptr = to_user_pointer(exec);
249 execbuf.buffer_count = 1;
250
251 gem_execbuf(fd, &execbuf);
252 igt_assert_neq(exec[0].offset, -1);
253
254 return exec[0].offset;
255 }
256
flink_and_close(void)257 static void flink_and_close(void)
258 {
259 uint32_t fd, fd2;
260 uint32_t bo, flinked_bo, new_bo, name;
261 uint64_t offset, offset_new;
262
263 fd = drm_open_driver(DRIVER_INTEL);
264 igt_require(gem_uses_full_ppgtt(fd));
265
266 bo = gem_create(fd, 4096);
267 name = gem_flink(fd, bo);
268
269 fd2 = drm_open_driver(DRIVER_INTEL);
270
271 flinked_bo = gem_open(fd2, name);
272 offset = exec_and_get_offset(fd2, flinked_bo);
273 gem_sync(fd2, flinked_bo);
274 gem_close(fd2, flinked_bo);
275
276 igt_drop_caches_set(fd, DROP_RETIRE | DROP_IDLE);
277
278 /* the flinked bo VMA should have been cleared now, so a new bo of the
279 * same size should get the same offset
280 */
281 new_bo = gem_create(fd2, 4096);
282 offset_new = exec_and_get_offset(fd2, new_bo);
283 gem_close(fd2, new_bo);
284
285 igt_assert_eq(offset, offset_new);
286
287 gem_close(fd, bo);
288 close(fd);
289 close(fd2);
290 }
291
292 #define N_CHILD 8
293 igt_main
294 {
295 igt_fixture {
296 int fd = drm_open_driver(DRIVER_INTEL);
297 igt_require_gem(fd);
298 close(fd);
299 }
300
301 igt_subtest("blt-vs-render-ctx0") {
302 drm_intel_bo *bcs[1], *rcs[N_CHILD];
303
304 fork_bcs_copy(30, 0x4000, bcs, 1);
305 fork_rcs_copy(30, 0x8000 / N_CHILD, rcs, N_CHILD, 0);
306
307 igt_waitchildren();
308
309 surfaces_check(bcs, 1, 0x4000);
310 surfaces_check(rcs, N_CHILD, 0x8000 / N_CHILD);
311 }
312
313 igt_subtest("blt-vs-render-ctxN") {
314 drm_intel_bo *bcs[1], *rcs[N_CHILD];
315
316 fork_rcs_copy(30, 0x8000 / N_CHILD, rcs, N_CHILD, CREATE_CONTEXT);
317 fork_bcs_copy(30, 0x4000, bcs, 1);
318
319 igt_waitchildren();
320
321 surfaces_check(bcs, 1, 0x4000);
322 surfaces_check(rcs, N_CHILD, 0x8000 / N_CHILD);
323 }
324
325 igt_subtest("flink-and-close-vma-leak")
326 flink_and_close();
327 }
328