1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "igt.h"
25 #include "igt_rand.h"
26
27 IGT_TEST_DESCRIPTION("Fill the GTT with batches.");
28
29 #define BATCH_SIZE (4096<<10)
30
31 struct batch {
32 uint32_t handle;
33 void *ptr;
34 };
35
xchg_batch(void * array,unsigned int i,unsigned int j)36 static void xchg_batch(void *array, unsigned int i, unsigned int j)
37 {
38 struct batch *batches = array;
39 struct batch tmp;
40
41 tmp = batches[i];
42 batches[i] = batches[j];
43 batches[j] = tmp;
44 }
45
submit(int fd,int gen,struct drm_i915_gem_execbuffer2 * eb,struct drm_i915_gem_relocation_entry * reloc,struct batch * batches,unsigned int count)46 static void submit(int fd, int gen,
47 struct drm_i915_gem_execbuffer2 *eb,
48 struct drm_i915_gem_relocation_entry *reloc,
49 struct batch *batches, unsigned int count)
50 {
51 struct drm_i915_gem_exec_object2 obj;
52 uint32_t batch[16];
53 unsigned n;
54
55 memset(&obj, 0, sizeof(obj));
56 obj.relocs_ptr = to_user_pointer(reloc);
57 obj.relocation_count = 2;
58
59 memset(reloc, 0, 2*sizeof(*reloc));
60 reloc[0].offset = eb->batch_start_offset;
61 reloc[0].offset += sizeof(uint32_t);
62 reloc[0].delta = BATCH_SIZE - eb->batch_start_offset - 8;
63 reloc[0].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
64 reloc[1].offset = eb->batch_start_offset;
65 reloc[1].offset += 3*sizeof(uint32_t);
66 reloc[1].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
67
68 n = 0;
69 batch[n] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
70 if (gen >= 8) {
71 batch[n] |= 1 << 21;
72 batch[n]++;
73 batch[++n] = reloc[0].delta;/* lower_32_bits(address) */
74 batch[++n] = 0; /* upper_32_bits(address) */
75 } else if (gen >= 4) {
76 batch[++n] = 0;
77 batch[++n] = reloc[0].delta;/* lower_32_bits(address) */
78 reloc[0].offset += sizeof(uint32_t);
79 } else {
80 batch[n]--;
81 batch[++n] = reloc[0].delta;/* lower_32_bits(address) */
82 reloc[1].offset -= sizeof(uint32_t);
83 }
84 batch[++n] = 0; /* lower_32_bits(value) */
85 batch[++n] = 0; /* upper_32_bits(value) / nop */
86 batch[++n] = MI_BATCH_BUFFER_END;
87
88 eb->buffers_ptr = to_user_pointer(&obj);
89 for (unsigned i = 0; i < count; i++) {
90 obj.handle = batches[i].handle;
91 reloc[0].target_handle = obj.handle;
92 reloc[1].target_handle = obj.handle;
93
94 obj.offset = 0;
95 reloc[0].presumed_offset = obj.offset;
96 reloc[1].presumed_offset = obj.offset;
97
98 memcpy(batches[i].ptr + eb->batch_start_offset,
99 batch, sizeof(batch));
100
101 gem_execbuf(fd, eb);
102 }
103 /* As we have been lying about the write_domain, we need to do a sync */
104 gem_sync(fd, obj.handle);
105 }
106
fillgtt(int fd,unsigned ring,int timeout)107 static void fillgtt(int fd, unsigned ring, int timeout)
108 {
109 const int gen = intel_gen(intel_get_drm_devid(fd));
110 struct drm_i915_gem_execbuffer2 execbuf;
111 struct drm_i915_gem_relocation_entry reloc[2];
112 volatile uint64_t *shared;
113 struct batch *batches;
114 unsigned engines[16];
115 unsigned nengine;
116 unsigned engine;
117 uint64_t size;
118 unsigned count;
119
120 shared = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
121 igt_assert(shared != MAP_FAILED);
122
123 nengine = 0;
124 if (ring == 0) {
125 for_each_physical_engine(fd, engine) {
126 if (!gem_can_store_dword(fd, engine))
127 continue;
128
129 engines[nengine++] = engine;
130 }
131 } else {
132 gem_require_ring(fd, ring);
133 igt_require(gem_can_store_dword(fd, ring));
134
135 engines[nengine++] = ring;
136 }
137 igt_require(nengine);
138
139 size = gem_aperture_size(fd);
140 if (size > 1ull<<32) /* Limit to 4GiB as we do not use allow-48b */
141 size = 1ull << 32;
142 igt_require(size < (1ull<<32) * BATCH_SIZE);
143
144 count = size / BATCH_SIZE + 1;
145 igt_debug("Using %'d batches to fill %'llu aperture on %d engines\n",
146 count, (long long)size, nengine);
147 intel_require_memory(count, BATCH_SIZE, CHECK_RAM);
148 intel_detect_and_clear_missed_interrupts(fd);
149
150 memset(&execbuf, 0, sizeof(execbuf));
151 execbuf.buffer_count = 1;
152 if (gen < 6)
153 execbuf.flags |= I915_EXEC_SECURE;
154
155 batches = calloc(count, sizeof(*batches));
156 igt_assert(batches);
157 for (unsigned i = 0; i < count; i++) {
158 batches[i].handle = gem_create(fd, BATCH_SIZE);
159 batches[i].ptr =
160 __gem_mmap__wc(fd, batches[i].handle,
161 0, BATCH_SIZE, PROT_WRITE);
162 if (!batches[i].ptr) {
163 batches[i].ptr =
164 __gem_mmap__gtt(fd, batches[i].handle,
165 BATCH_SIZE, PROT_WRITE);
166 }
167 igt_require(batches[i].ptr);
168 }
169
170 /* Flush all memory before we start the timer */
171 submit(fd, gen, &execbuf, reloc, batches, count);
172
173 igt_fork(child, nengine) {
174 uint64_t cycles = 0;
175 hars_petruska_f54_1_random_perturb(child);
176 igt_permute_array(batches, count, xchg_batch);
177 execbuf.batch_start_offset = child*64;
178 execbuf.flags |= engines[child];
179 igt_until_timeout(timeout) {
180 submit(fd, gen, &execbuf, reloc, batches, count);
181 for (unsigned i = 0; i < count; i++) {
182 uint64_t offset, delta;
183
184 offset = *(uint64_t *)(batches[i].ptr + reloc[1].offset);
185 delta = *(uint64_t *)(batches[i].ptr + reloc[0].delta);
186 igt_assert_eq_u64(offset, delta);
187 }
188 cycles++;
189 }
190 shared[child] = cycles;
191 igt_info("engine[%d]: %llu cycles\n", child, (long long)cycles);
192 }
193 igt_waitchildren();
194
195 for (unsigned i = 0; i < count; i++) {
196 munmap(batches[i].ptr, BATCH_SIZE);
197 gem_close(fd, batches[i].handle);
198 }
199
200 shared[nengine] = 0;
201 for (unsigned i = 0; i < nengine; i++)
202 shared[nengine] += shared[i];
203 igt_info("Total: %llu cycles\n", (long long)shared[nengine]);
204
205 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
206 }
207
208 igt_main
209 {
210 const struct intel_execution_engine *e;
211 int device = -1;
212
213 igt_skip_on_simulation();
214
215 igt_fixture {
216 device = drm_open_driver(DRIVER_INTEL);
217 igt_require_gem(device);
218 igt_require(gem_can_store_dword(device, 0));
219 igt_fork_hang_detector(device);
220 }
221
222 igt_subtest("basic")
223 fillgtt(device, 0, 1); /* just enough to run a single pass */
224
225 for (e = intel_execution_engines; e->name; e++)
226 igt_subtest_f("%s", e->name)
227 fillgtt(device, e->exec_id | e->flags, 20);
228
229 igt_subtest("all")
230 fillgtt(device, 0, 150);
231
232 igt_fixture {
233 igt_stop_hang_detector();
234 close(device);
235 }
236 }
237