1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Arun Siluvery <[email protected]>
25 *
26 */
27
28 #include "igt.h"
29
30 #include <fcntl.h>
31
32 #define PAGE_SIZE 4096
33 #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
34
35 static int gen;
36
37 enum operation {
38 GPU_RESET,
39 SUSPEND_RESUME,
40 HIBERNATE_RESUME,
41 SIMPLE_READ,
42 };
43
44 struct intel_wa_reg {
45 uint32_t addr;
46 uint32_t value;
47 uint32_t mask;
48 };
49
50 static struct write_only_list {
51 unsigned int gen;
52 uint32_t addr;
53 } wo_list[] = {
54 { 10, 0xE5F0 } /* WaForceContextSaveRestoreNonCoherent:cnl */
55
56 /*
57 * FIXME: If you are contemplating adding stuff here
58 * consider this as a temporary solution. You need to
59 * manually check from context image that your workaround
60 * is having an effect. Consider creating a context image
61 * validator to act as a superior solution.
62 */
63 };
64
65 static struct intel_wa_reg *wa_regs;
66 static int num_wa_regs;
67
write_only(const uint32_t addr)68 static bool write_only(const uint32_t addr)
69 {
70 int i;
71
72 for (i = 0; i < ARRAY_SIZE(wo_list); i++) {
73 if (gen == wo_list[i].gen &&
74 addr == wo_list[i].addr) {
75 igt_info("Skipping check for 0x%x due to write only\n", addr);
76 return true;
77 }
78 }
79
80 return false;
81 }
82
83 #define MI_STORE_REGISTER_MEM (0x24 << 23)
84
workaround_fail_count(int i915,uint32_t ctx)85 static int workaround_fail_count(int i915, uint32_t ctx)
86 {
87 struct drm_i915_gem_exec_object2 obj[2];
88 struct drm_i915_gem_relocation_entry *reloc;
89 struct drm_i915_gem_execbuffer2 execbuf;
90 uint32_t result_sz, batch_sz;
91 uint32_t *base, *out;
92 igt_spin_t *spin;
93 int fw, fail = 0;
94
95 reloc = calloc(num_wa_regs, sizeof(*reloc));
96 igt_assert(reloc);
97
98 result_sz = 4 * num_wa_regs;
99 result_sz = PAGE_ALIGN(result_sz);
100
101 batch_sz = 16 * num_wa_regs + 4;
102 batch_sz = PAGE_ALIGN(batch_sz);
103
104 memset(obj, 0, sizeof(obj));
105 obj[0].handle = gem_create(i915, result_sz);
106 gem_set_caching(i915, obj[0].handle, I915_CACHING_CACHED);
107 obj[1].handle = gem_create(i915, batch_sz);
108 obj[1].relocs_ptr = to_user_pointer(reloc);
109 obj[1].relocation_count = num_wa_regs;
110
111 out = base =
112 gem_mmap__cpu(i915, obj[1].handle, 0, batch_sz, PROT_WRITE);
113 for (int i = 0; i < num_wa_regs; i++) {
114 *out++ = MI_STORE_REGISTER_MEM | ((gen >= 8 ? 4 : 2) - 2);
115 *out++ = wa_regs[i].addr;
116 reloc[i].target_handle = obj[0].handle;
117 reloc[i].offset = (out - base) * sizeof(*out);
118 reloc[i].delta = i * sizeof(uint32_t);
119 reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
120 reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
121 *out++ = reloc[i].delta;
122 if (gen >= 8)
123 *out++ = 0;
124 }
125 *out++ = MI_BATCH_BUFFER_END;
126 munmap(base, batch_sz);
127
128 memset(&execbuf, 0, sizeof(execbuf));
129 execbuf.buffers_ptr = to_user_pointer(obj);
130 execbuf.buffer_count = 2;
131 execbuf.rsvd1 = ctx;
132 gem_execbuf(i915, &execbuf);
133
134 gem_set_domain(i915, obj[0].handle, I915_GEM_DOMAIN_CPU, 0);
135
136 spin = igt_spin_new(i915, .ctx = ctx, .flags = IGT_SPIN_POLL_RUN);
137 igt_spin_busywait_until_started(spin);
138
139 fw = igt_open_forcewake_handle(i915);
140 if (fw < 0)
141 igt_debug("Unable to obtain i915_user_forcewake!\n");
142
143 igt_debug("Address\tval\t\tmask\t\tread\t\tresult\n");
144
145 out = gem_mmap__cpu(i915, obj[0].handle, 0, result_sz, PROT_READ);
146 for (int i = 0; i < num_wa_regs; i++) {
147 char buf[80];
148
149 snprintf(buf, sizeof(buf),
150 "0x%05X\t0x%08X\t0x%08X\t0x%08X",
151 wa_regs[i].addr, wa_regs[i].value, wa_regs[i].mask,
152 out[i]);
153
154 /* If the SRM failed, fill in the result using mmio */
155 if (out[i] == 0)
156 out[i] = *(volatile uint32_t *)(igt_global_mmio + wa_regs[i].addr);
157
158 if ((wa_regs[i].value & wa_regs[i].mask) ==
159 (out[i] & wa_regs[i].mask)) {
160 igt_debug("%s\tOK\n", buf);
161 } else if (write_only(wa_regs[i].addr)) {
162 igt_debug("%s\tIGNORED (w/o)\n", buf);
163 } else {
164 igt_warn("%s\tFAIL\n", buf);
165 fail++;
166 }
167 }
168 munmap(out, result_sz);
169
170 close(fw);
171 igt_spin_free(i915, spin);
172
173 gem_close(i915, obj[1].handle);
174 gem_close(i915, obj[0].handle);
175 free(reloc);
176
177 return fail;
178 }
179
180 #define CONTEXT 0x1
181 #define FD 0x2
check_workarounds(int fd,enum operation op,unsigned int flags)182 static void check_workarounds(int fd, enum operation op, unsigned int flags)
183 {
184 uint32_t ctx = 0;
185
186 if (flags & FD)
187 fd = gem_reopen_driver(fd);
188
189 if (flags & CONTEXT) {
190 gem_require_contexts(fd);
191 ctx = gem_context_create(fd);
192 }
193
194 igt_assert_eq(workaround_fail_count(fd, ctx), 0);
195
196 switch (op) {
197 case GPU_RESET:
198 igt_force_gpu_reset(fd);
199 break;
200
201 case SUSPEND_RESUME:
202 igt_system_suspend_autoresume(SUSPEND_STATE_MEM,
203 SUSPEND_TEST_NONE);
204 break;
205
206 case HIBERNATE_RESUME:
207 igt_system_suspend_autoresume(SUSPEND_STATE_DISK,
208 SUSPEND_TEST_NONE);
209 break;
210
211 case SIMPLE_READ:
212 break;
213
214 default:
215 igt_assert(0);
216 }
217
218 igt_assert_eq(workaround_fail_count(fd, ctx), 0);
219
220 if (flags & CONTEXT)
221 gem_context_destroy(fd, ctx);
222 if (flags & FD)
223 close(fd);
224 }
225
226 igt_main
227 {
228 int device = -1;
229 const struct {
230 const char *name;
231 enum operation op;
232 } ops[] = {
233 { "basic-read", SIMPLE_READ },
234 { "reset", GPU_RESET },
235 { "suspend-resume", SUSPEND_RESUME },
236 { "hibernate-resume", HIBERNATE_RESUME },
237 { }
238 }, *op;
239 const struct {
240 const char *name;
241 unsigned int flags;
242 } modes[] = {
243 { "", 0 },
244 { "-context", CONTEXT },
245 { "-fd", FD },
246 { }
247 }, *m;
248
249 igt_fixture {
250 FILE *file;
251 char *line = NULL;
252 char *str;
253 size_t line_size;
254 int i, fd;
255
256 device = drm_open_driver(DRIVER_INTEL);
257 igt_require_gem(device);
258
259 intel_mmio_use_pci_bar(intel_get_pci_device());
260
261 gen = intel_gen(intel_get_drm_devid(device));
262
263 fd = igt_debugfs_open(device, "i915_wa_registers", O_RDONLY);
264 file = fdopen(fd, "r");
265 igt_require(getline(&line, &line_size, file) > 0);
266 igt_debug("i915_wa_registers: %s", line);
267
268 /* We assume that the first batch is for rcs */
269 str = strstr(line, "Workarounds applied:");
270 igt_assert(str);
271 sscanf(str, "Workarounds applied: %d", &num_wa_regs);
272 igt_require(num_wa_regs > 0);
273
274 wa_regs = malloc(num_wa_regs * sizeof(*wa_regs));
275 igt_assert(wa_regs);
276
277 i = 0;
278 while (getline(&line, &line_size, file) > 0) {
279 if (strstr(line, "Workarounds applied:"))
280 break;
281
282 igt_debug("%s", line);
283 if (sscanf(line, "0x%X: 0x%08X, mask: 0x%08X",
284 &wa_regs[i].addr,
285 &wa_regs[i].value,
286 &wa_regs[i].mask) == 3)
287 i++;
288 }
289
290 igt_assert_lte(i, num_wa_regs);
291
292 free(line);
293 fclose(file);
294 close(fd);
295 }
296
297 for (op = ops; op->name; op++) {
298 igt_subtest_group {
299 igt_hang_t hang = {};
300
301 igt_fixture {
302 switch (op->op) {
303 case GPU_RESET:
304 hang = igt_allow_hang(device, 0, 0);
305 break;
306 default:
307 break;
308 }
309 }
310
311 for (m = modes; m->name; m++)
312 igt_subtest_f("%s%s", op->name, m->name)
313 check_workarounds(device, op->op, m->flags);
314
315 igt_fixture {
316 switch (op->op) {
317 case GPU_RESET:
318 igt_disallow_hang(device, hang);
319 break;
320 default:
321 break;
322 }
323 }
324 }
325 }
326 }
327