1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/u_pack_color.h"
25 #include "util/u_upload_mgr.h"
26
27 #include "v3d_context.h"
28 #include "compiler/v3d_compiler.h"
29
30 /* We don't expect that the packets we use in this file change across across
31 * hw versions, so we just include directly the v42 header
32 */
33 #include "broadcom/cle/v3d_packet_v42_pack.h"
34
35 static uint32_t
get_texrect_scale(struct v3d_texture_stateobj * texstate,enum quniform_contents contents,uint32_t data)36 get_texrect_scale(struct v3d_texture_stateobj *texstate,
37 enum quniform_contents contents,
38 uint32_t data)
39 {
40 struct pipe_sampler_view *texture = texstate->textures[data];
41 uint32_t dim;
42
43 if (contents == QUNIFORM_TEXRECT_SCALE_X)
44 dim = texture->texture->width0;
45 else
46 dim = texture->texture->height0;
47
48 return fui(1.0f / dim);
49 }
50
51 static uint32_t
get_texture_size(struct v3d_texture_stateobj * texstate,enum quniform_contents contents,uint32_t data)52 get_texture_size(struct v3d_texture_stateobj *texstate,
53 enum quniform_contents contents,
54 uint32_t data)
55 {
56 struct pipe_sampler_view *texture = texstate->textures[data];
57 switch (contents) {
58 case QUNIFORM_TEXTURE_WIDTH:
59 if (texture->target == PIPE_BUFFER) {
60 return texture->u.buf.size /
61 util_format_get_blocksize(texture->format);
62 } else {
63 return u_minify(texture->texture->width0,
64 texture->u.tex.first_level);
65 }
66 case QUNIFORM_TEXTURE_HEIGHT:
67 return u_minify(texture->texture->height0,
68 texture->u.tex.first_level);
69 case QUNIFORM_TEXTURE_DEPTH:
70 assert(texture->target != PIPE_BUFFER);
71 return u_minify(texture->texture->depth0,
72 texture->u.tex.first_level);
73 case QUNIFORM_TEXTURE_ARRAY_SIZE:
74 assert(texture->target != PIPE_BUFFER);
75 if (texture->target != PIPE_TEXTURE_CUBE_ARRAY) {
76 return texture->texture->array_size;
77 } else {
78 assert(texture->texture->array_size % 6 == 0);
79 return texture->texture->array_size / 6;
80 }
81 case QUNIFORM_TEXTURE_LEVELS:
82 assert(texture->target != PIPE_BUFFER);
83 return (texture->u.tex.last_level -
84 texture->u.tex.first_level) + 1;
85 default:
86 unreachable("Bad texture size field");
87 }
88 }
89
90 static uint32_t
get_image_size(struct v3d_shaderimg_stateobj * shaderimg,enum quniform_contents contents,uint32_t data)91 get_image_size(struct v3d_shaderimg_stateobj *shaderimg,
92 enum quniform_contents contents,
93 uint32_t data)
94 {
95 struct v3d_image_view *image = &shaderimg->si[data];
96
97 switch (contents) {
98 case QUNIFORM_IMAGE_WIDTH:
99 if (image->base.resource->target == PIPE_BUFFER) {
100 return image->base.u.buf.size /
101 util_format_get_blocksize(image->base.format);
102 } else {
103 return u_minify(image->base.resource->width0,
104 image->base.u.tex.level);
105 }
106 case QUNIFORM_IMAGE_HEIGHT:
107 assert(image->base.resource->target != PIPE_BUFFER);
108 return u_minify(image->base.resource->height0,
109 image->base.u.tex.level);
110 case QUNIFORM_IMAGE_DEPTH:
111 assert(image->base.resource->target != PIPE_BUFFER);
112 return u_minify(image->base.resource->depth0,
113 image->base.u.tex.level);
114 case QUNIFORM_IMAGE_ARRAY_SIZE:
115 assert(image->base.resource->target != PIPE_BUFFER);
116 if (image->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY) {
117 return image->base.resource->array_size;
118 } else {
119 assert(image->base.resource->array_size % 6 == 0);
120 return image->base.resource->array_size / 6;
121 }
122 default:
123 unreachable("Bad texture size field");
124 }
125 }
126
127 /** Writes the V3D 4.x TMU configuration parameter 0. */
128 static void
write_tmu_p0(struct v3d_job * job,struct v3d_cl_out ** uniforms,struct v3d_texture_stateobj * texstate,uint32_t data)129 write_tmu_p0(struct v3d_job *job,
130 struct v3d_cl_out **uniforms,
131 struct v3d_texture_stateobj *texstate,
132 uint32_t data)
133 {
134 int unit = v3d_unit_data_get_unit(data);
135 struct pipe_sampler_view *psview = texstate->textures[unit];
136 struct v3d_sampler_view *sview = v3d_sampler_view(psview);
137 /* GL_OES_texture_buffer spec:
138 * "If no buffer object is bound to the buffer texture, the
139 * results of the texel access are undefined."
140 *
141 * This can be interpreted as allowing any result to come back, but
142 * not terminate the program (and some tests interpret that).
143 *
144 * FIXME: just return is not a full valid solution, as it could still
145 * try to get a wrong address for the shader state address. Perhaps we
146 * would need to set up a BO with a "default texture state"
147 */
148 if (sview == NULL)
149 return;
150
151 struct v3d_resource *rsc = v3d_resource(sview->texture);
152
153 cl_aligned_reloc(&job->indirect, uniforms, sview->bo,
154 v3d_unit_data_get_offset(data));
155 v3d_job_add_bo(job, rsc->bo);
156 }
157
158 static void
write_image_tmu_p0(struct v3d_job * job,struct v3d_cl_out ** uniforms,struct v3d_shaderimg_stateobj * img,uint32_t data)159 write_image_tmu_p0(struct v3d_job *job,
160 struct v3d_cl_out **uniforms,
161 struct v3d_shaderimg_stateobj *img,
162 uint32_t data)
163 {
164 /* Extract the image unit from the top bits, and the compiler's
165 * packed p0 from the bottom.
166 */
167 uint32_t unit = data >> 24;
168 uint32_t p0 = data & 0x00ffffff;
169
170 struct v3d_image_view *iview = &img->si[unit];
171 struct v3d_resource *rsc = v3d_resource(iview->base.resource);
172
173 cl_aligned_reloc(&job->indirect, uniforms,
174 v3d_resource(iview->tex_state)->bo,
175 iview->tex_state_offset | p0);
176 v3d_job_add_bo(job, rsc->bo);
177 }
178
179 /** Writes the V3D 4.x TMU configuration parameter 1. */
180 static void
write_tmu_p1(struct v3d_job * job,struct v3d_cl_out ** uniforms,struct v3d_texture_stateobj * texstate,uint32_t data)181 write_tmu_p1(struct v3d_job *job,
182 struct v3d_cl_out **uniforms,
183 struct v3d_texture_stateobj *texstate,
184 uint32_t data)
185 {
186 uint32_t unit = v3d_unit_data_get_unit(data);
187 struct pipe_sampler_state *psampler = texstate->samplers[unit];
188 struct v3d_sampler_state *sampler = v3d_sampler_state(psampler);
189 struct pipe_sampler_view *psview = texstate->textures[unit];
190 struct v3d_sampler_view *sview = v3d_sampler_view(psview);
191 int variant = 0;
192
193 /* If we are being asked by the compiler to write parameter 1, then we
194 * need that. So if we are at this point, we should expect to have a
195 * sampler and psampler. As an additional assert, we can check that we
196 * are not on a texel buffer case, as these don't have a sampler.
197 */
198 assert(psview->target != PIPE_BUFFER);
199 assert(sampler);
200 assert(psampler);
201
202 if (sampler->border_color_variants)
203 variant = sview->sampler_variant;
204
205 uint32_t p1_packed = v3d_unit_data_get_offset(data);
206 v3d_pack_unnormalized_coordinates(&job->v3d->screen->devinfo, &p1_packed,
207 sampler->base.unnormalized_coords);
208
209 cl_aligned_reloc(&job->indirect, uniforms,
210 v3d_resource(sampler->sampler_state)->bo,
211 sampler->sampler_state_offset[variant] | p1_packed);
212 }
213
214 struct v3d_cl_reloc
v3d_write_uniforms(struct v3d_context * v3d,struct v3d_job * job,struct v3d_compiled_shader * shader,enum pipe_shader_type stage)215 v3d_write_uniforms(struct v3d_context *v3d, struct v3d_job *job,
216 struct v3d_compiled_shader *shader,
217 enum pipe_shader_type stage)
218 {
219 struct v3d_device_info *devinfo = &v3d->screen->devinfo;
220 struct v3d_constbuf_stateobj *cb = &v3d->constbuf[stage];
221 struct v3d_texture_stateobj *texstate = &v3d->tex[stage];
222 struct v3d_uniform_list *uinfo = &shader->prog_data.base->uniforms;
223 const uint32_t *gallium_uniforms = cb->cb[0].user_buffer;
224
225 /* The hardware always pre-fetches the next uniform (also when there
226 * aren't any), so we always allocate space for an extra slot. This
227 * fixes MMU exceptions reported since Linux kernel 5.4 when the
228 * uniforms fill up the tail bytes of a page in the indirect
229 * BO. In that scenario, when the hardware pre-fetches after reading
230 * the last uniform it will read beyond the end of the page and trigger
231 * the MMU exception.
232 */
233 v3d_cl_ensure_space(&job->indirect, (uinfo->count + 1) * 4, 4);
234
235 struct v3d_cl_reloc uniform_stream = cl_get_address(&job->indirect);
236 v3d_bo_reference(uniform_stream.bo);
237
238 struct v3d_cl_out *uniforms =
239 cl_start(&job->indirect);
240
241 for (int i = 0; i < uinfo->count; i++) {
242 uint32_t data = uinfo->data[i];
243
244 switch (uinfo->contents[i]) {
245 case QUNIFORM_CONSTANT:
246 cl_aligned_u32(&uniforms, data);
247 break;
248 case QUNIFORM_UNIFORM:
249 cl_aligned_u32(&uniforms, gallium_uniforms[data]);
250 break;
251 case QUNIFORM_VIEWPORT_X_SCALE: {
252 cl_aligned_f(&uniforms, v3d->viewport.scale[0] * devinfo->clipper_xy_granularity);
253 break;
254 }
255 case QUNIFORM_VIEWPORT_Y_SCALE: {
256 cl_aligned_f(&uniforms, v3d->viewport.scale[1] * devinfo->clipper_xy_granularity);
257 break;
258 }
259 case QUNIFORM_VIEWPORT_Z_OFFSET:
260 cl_aligned_f(&uniforms, v3d->viewport.translate[2]);
261 break;
262 case QUNIFORM_VIEWPORT_Z_SCALE:
263 cl_aligned_f(&uniforms, v3d->viewport.scale[2]);
264 break;
265
266 case QUNIFORM_USER_CLIP_PLANE:
267 cl_aligned_f(&uniforms,
268 v3d->clip.ucp[data / 4][data % 4]);
269 break;
270
271 case QUNIFORM_TMU_CONFIG_P0:
272 write_tmu_p0(job, &uniforms, texstate, data);
273 break;
274
275 case QUNIFORM_TMU_CONFIG_P1:
276 write_tmu_p1(job, &uniforms, texstate, data);
277 break;
278
279 case QUNIFORM_IMAGE_TMU_CONFIG_P0:
280 write_image_tmu_p0(job, &uniforms,
281 &v3d->shaderimg[stage], data);
282 break;
283
284 case QUNIFORM_TEXRECT_SCALE_X:
285 case QUNIFORM_TEXRECT_SCALE_Y:
286 cl_aligned_u32(&uniforms,
287 get_texrect_scale(texstate,
288 uinfo->contents[i],
289 data));
290 break;
291
292 case QUNIFORM_TEXTURE_WIDTH:
293 case QUNIFORM_TEXTURE_HEIGHT:
294 case QUNIFORM_TEXTURE_DEPTH:
295 case QUNIFORM_TEXTURE_ARRAY_SIZE:
296 case QUNIFORM_TEXTURE_LEVELS:
297 cl_aligned_u32(&uniforms,
298 get_texture_size(texstate,
299 uinfo->contents[i],
300 data));
301 break;
302
303 case QUNIFORM_IMAGE_WIDTH:
304 case QUNIFORM_IMAGE_HEIGHT:
305 case QUNIFORM_IMAGE_DEPTH:
306 case QUNIFORM_IMAGE_ARRAY_SIZE:
307 cl_aligned_u32(&uniforms,
308 get_image_size(&v3d->shaderimg[stage],
309 uinfo->contents[i],
310 data));
311 break;
312
313 case QUNIFORM_LINE_WIDTH:
314 cl_aligned_f(&uniforms,
315 v3d->rasterizer->base.line_width);
316 break;
317
318 case QUNIFORM_AA_LINE_WIDTH:
319 cl_aligned_f(&uniforms, v3d_get_real_line_width(v3d));
320 break;
321
322 case QUNIFORM_UBO_ADDR: {
323 uint32_t unit = v3d_unit_data_get_unit(data);
324 /* Constant buffer 0 may be a system memory pointer,
325 * in which case we want to upload a shadow copy to
326 * the GPU.
327 */
328 if (!cb->cb[unit].buffer) {
329 u_upload_data(v3d->uploader, 0,
330 cb->cb[unit].buffer_size, 16,
331 cb->cb[unit].user_buffer,
332 &cb->cb[unit].buffer_offset,
333 &cb->cb[unit].buffer);
334 }
335
336 cl_aligned_reloc(&job->indirect, &uniforms,
337 v3d_resource(cb->cb[unit].buffer)->bo,
338 cb->cb[unit].buffer_offset +
339 v3d_unit_data_get_offset(data));
340 break;
341 }
342
343 case QUNIFORM_SSBO_OFFSET: {
344 struct pipe_shader_buffer *sb =
345 &v3d->ssbo[stage].sb[data];
346
347 cl_aligned_reloc(&job->indirect, &uniforms,
348 v3d_resource(sb->buffer)->bo,
349 sb->buffer_offset);
350 break;
351 }
352
353 case QUNIFORM_GET_SSBO_SIZE:
354 cl_aligned_u32(&uniforms,
355 v3d->ssbo[stage].sb[data].buffer_size);
356 break;
357
358 case QUNIFORM_TEXTURE_FIRST_LEVEL:
359 cl_aligned_f(&uniforms,
360 texstate->textures[data]->u.tex.first_level);
361 break;
362
363 case QUNIFORM_SPILL_OFFSET:
364 cl_aligned_reloc(&job->indirect, &uniforms,
365 v3d->prog.spill_bo, 0);
366 break;
367
368 case QUNIFORM_SPILL_SIZE_PER_THREAD:
369 cl_aligned_u32(&uniforms,
370 v3d->prog.spill_size_per_thread);
371 break;
372
373 case QUNIFORM_NUM_WORK_GROUPS:
374 cl_aligned_u32(&uniforms,
375 v3d->compute_num_workgroups[data]);
376 break;
377
378 case QUNIFORM_WORK_GROUP_SIZE:
379 cl_aligned_u32(&uniforms,
380 v3d->compute_workgroup_size[data]);
381 break;
382
383 case QUNIFORM_SHARED_OFFSET:
384 cl_aligned_reloc(&job->indirect, &uniforms,
385 v3d->compute_shared_memory, 0);
386 break;
387
388 case QUNIFORM_SHARED_SIZE:
389 cl_aligned_u32(&uniforms, v3d->shared_memory);
390 break;
391
392 case QUNIFORM_FB_LAYERS:
393 cl_aligned_u32(&uniforms, job->num_layers);
394 break;
395
396 default:
397 unreachable("Unknown QUNIFORM");
398
399 }
400 #if 0
401 uint32_t written_val = *((uint32_t *)uniforms - 1);
402 fprintf(stderr, "shader %p[%d]: 0x%08x / 0x%08x (%f) ",
403 shader, i, __gen_address_offset(&uniform_stream) + i * 4,
404 written_val, uif(written_val));
405 vir_dump_uniform(uinfo->contents[i], data);
406 fprintf(stderr, "\n");
407 #endif
408 }
409
410 cl_end(&job->indirect, uniforms);
411
412 return uniform_stream;
413 }
414
415 void
v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader * shader)416 v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader)
417 {
418 uint64_t dirty = 0;
419
420 for (int i = 0; i < shader->prog_data.base->uniforms.count; i++) {
421 switch (shader->prog_data.base->uniforms.contents[i]) {
422 case QUNIFORM_CONSTANT:
423 break;
424 case QUNIFORM_UNIFORM:
425 case QUNIFORM_UBO_ADDR:
426 dirty |= V3D_DIRTY_CONSTBUF;
427 break;
428
429 case QUNIFORM_VIEWPORT_X_SCALE:
430 case QUNIFORM_VIEWPORT_Y_SCALE:
431 case QUNIFORM_VIEWPORT_Z_OFFSET:
432 case QUNIFORM_VIEWPORT_Z_SCALE:
433 dirty |= V3D_DIRTY_VIEWPORT;
434 break;
435
436 case QUNIFORM_USER_CLIP_PLANE:
437 dirty |= V3D_DIRTY_CLIP;
438 break;
439
440 case QUNIFORM_TMU_CONFIG_P0:
441 case QUNIFORM_TMU_CONFIG_P1:
442 case QUNIFORM_TEXTURE_CONFIG_P1:
443 case QUNIFORM_TEXTURE_FIRST_LEVEL:
444 case QUNIFORM_TEXRECT_SCALE_X:
445 case QUNIFORM_TEXRECT_SCALE_Y:
446 case QUNIFORM_TEXTURE_WIDTH:
447 case QUNIFORM_TEXTURE_HEIGHT:
448 case QUNIFORM_TEXTURE_DEPTH:
449 case QUNIFORM_TEXTURE_ARRAY_SIZE:
450 case QUNIFORM_TEXTURE_LEVELS:
451 case QUNIFORM_SPILL_OFFSET:
452 case QUNIFORM_SPILL_SIZE_PER_THREAD:
453 /* We could flag this on just the stage we're
454 * compiling for, but it's not passed in.
455 */
456 dirty |= V3D_DIRTY_FRAGTEX | V3D_DIRTY_VERTTEX |
457 V3D_DIRTY_GEOMTEX | V3D_DIRTY_COMPTEX;
458 break;
459
460 case QUNIFORM_SSBO_OFFSET:
461 case QUNIFORM_GET_SSBO_SIZE:
462 dirty |= V3D_DIRTY_SSBO;
463 break;
464
465 case QUNIFORM_IMAGE_TMU_CONFIG_P0:
466 case QUNIFORM_IMAGE_WIDTH:
467 case QUNIFORM_IMAGE_HEIGHT:
468 case QUNIFORM_IMAGE_DEPTH:
469 case QUNIFORM_IMAGE_ARRAY_SIZE:
470 dirty |= V3D_DIRTY_SHADER_IMAGE;
471 break;
472
473 case QUNIFORM_LINE_WIDTH:
474 case QUNIFORM_AA_LINE_WIDTH:
475 dirty |= V3D_DIRTY_RASTERIZER;
476 break;
477
478 case QUNIFORM_NUM_WORK_GROUPS:
479 case QUNIFORM_WORK_GROUP_SIZE:
480 case QUNIFORM_SHARED_OFFSET:
481 case QUNIFORM_SHARED_SIZE:
482 /* Compute always recalculates uniforms. */
483 break;
484
485 case QUNIFORM_FB_LAYERS:
486 dirty |= V3D_DIRTY_FRAMEBUFFER;
487 break;
488
489 default:
490 assert(quniform_contents_is_texture_p0(shader->prog_data.base->uniforms.contents[i]));
491 dirty |= V3D_DIRTY_FRAGTEX | V3D_DIRTY_VERTTEX |
492 V3D_DIRTY_GEOMTEX | V3D_DIRTY_COMPTEX;
493 break;
494 }
495 }
496
497 shader->uniform_dirty_bits = dirty;
498 }
499