1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "blorp_priv.h"
25 #include "compiler/intel_nir.h"
26 #include "dev/intel_device_info.h"
27 #include "util/u_math.h"
28
29 enum intel_measure_snapshot_type
blorp_op_to_intel_measure_snapshot(enum blorp_op op)30 blorp_op_to_intel_measure_snapshot(enum blorp_op op)
31 {
32 enum intel_measure_snapshot_type vals[] = {
33 #define MAP(name) [BLORP_OP_##name] = INTEL_SNAPSHOT_##name
34 MAP(BLIT),
35 MAP(COPY),
36 MAP(CCS_AMBIGUATE),
37 MAP(CCS_COLOR_CLEAR),
38 MAP(CCS_PARTIAL_RESOLVE),
39 MAP(CCS_RESOLVE),
40 MAP(HIZ_AMBIGUATE),
41 MAP(HIZ_CLEAR),
42 MAP(HIZ_RESOLVE),
43 MAP(MCS_AMBIGUATE),
44 MAP(MCS_COLOR_CLEAR),
45 MAP(MCS_PARTIAL_RESOLVE),
46 MAP(SLOW_COLOR_CLEAR),
47 MAP(SLOW_DEPTH_CLEAR),
48 #undef MAP
49 };
50 assert(op < ARRAY_SIZE(vals));
51
52 return vals[op];
53 }
54
blorp_op_to_name(enum blorp_op op)55 const char *blorp_op_to_name(enum blorp_op op)
56 {
57 const char *names[] = {
58 #define MAP(name) [BLORP_OP_##name] = #name
59 MAP(BLIT),
60 MAP(COPY),
61 MAP(CCS_AMBIGUATE),
62 MAP(CCS_COLOR_CLEAR),
63 MAP(CCS_PARTIAL_RESOLVE),
64 MAP(CCS_RESOLVE),
65 MAP(HIZ_AMBIGUATE),
66 MAP(HIZ_CLEAR),
67 MAP(HIZ_RESOLVE),
68 MAP(MCS_AMBIGUATE),
69 MAP(MCS_COLOR_CLEAR),
70 MAP(MCS_PARTIAL_RESOLVE),
71 MAP(SLOW_COLOR_CLEAR),
72 MAP(SLOW_DEPTH_CLEAR),
73 #undef MAP
74 };
75 assert(op < ARRAY_SIZE(names));
76
77 return names[op];
78 }
79
80 const char *
blorp_shader_type_to_name(enum blorp_shader_type type)81 blorp_shader_type_to_name(enum blorp_shader_type type)
82 {
83 static const char *shader_name[] = {
84 [BLORP_SHADER_TYPE_COPY] = "BLORP-copy",
85 [BLORP_SHADER_TYPE_BLIT] = "BLORP-blit",
86 [BLORP_SHADER_TYPE_CLEAR] = "BLORP-clear",
87 [BLORP_SHADER_TYPE_MCS_PARTIAL_RESOLVE] = "BLORP-mcs-partial-resolve",
88 [BLORP_SHADER_TYPE_LAYER_OFFSET_VS] = "BLORP-layer-offset-vs",
89 [BLORP_SHADER_TYPE_GFX4_SF] = "BLORP-gfx4-sf",
90 };
91 assert(type < ARRAY_SIZE(shader_name));
92
93 return shader_name[type];
94 }
95
96 const char *
blorp_shader_pipeline_to_name(enum blorp_shader_pipeline pipe)97 blorp_shader_pipeline_to_name(enum blorp_shader_pipeline pipe)
98 {
99 static const char *pipeline_name[] = {
100 [BLORP_SHADER_PIPELINE_RENDER] = "render",
101 [BLORP_SHADER_PIPELINE_COMPUTE] = "compute",
102 };
103 assert(pipe < ARRAY_SIZE(pipeline_name));
104
105 return pipeline_name[pipe];
106 }
107
108 void
blorp_init(struct blorp_context * blorp,void * driver_ctx,struct isl_device * isl_dev,const struct blorp_config * config)109 blorp_init(struct blorp_context *blorp, void *driver_ctx,
110 struct isl_device *isl_dev, const struct blorp_config *config)
111 {
112 memset(blorp, 0, sizeof(*blorp));
113
114 blorp->driver_ctx = driver_ctx;
115 blorp->isl_dev = isl_dev;
116 if (config)
117 blorp->config = *config;
118
119 blorp->compiler = rzalloc(NULL, struct blorp_compiler);
120 }
121
122 void
blorp_finish(struct blorp_context * blorp)123 blorp_finish(struct blorp_context *blorp)
124 {
125 ralloc_free(blorp->compiler);
126 blorp->driver_ctx = NULL;
127 }
128
129 void
blorp_batch_init(struct blorp_context * blorp,struct blorp_batch * batch,void * driver_batch,enum blorp_batch_flags flags)130 blorp_batch_init(struct blorp_context *blorp,
131 struct blorp_batch *batch, void *driver_batch,
132 enum blorp_batch_flags flags)
133 {
134 batch->blorp = blorp;
135 batch->driver_batch = driver_batch;
136 batch->flags = flags;
137 }
138
139 void
blorp_batch_finish(struct blorp_batch * batch)140 blorp_batch_finish(struct blorp_batch *batch)
141 {
142 batch->blorp = NULL;
143 }
144
145 void
blorp_surface_info_init(struct blorp_batch * batch,struct blorp_surface_info * info,const struct blorp_surf * surf,unsigned int level,float layer,enum isl_format format,bool is_dest)146 blorp_surface_info_init(struct blorp_batch *batch,
147 struct blorp_surface_info *info,
148 const struct blorp_surf *surf,
149 unsigned int level, float layer,
150 enum isl_format format, bool is_dest)
151 {
152 struct blorp_context *blorp = batch->blorp;
153 memset(info, 0, sizeof(*info));
154 assert(level < surf->surf->levels);
155 assert(layer < MAX2(surf->surf->logical_level0_px.depth >> level,
156 surf->surf->logical_level0_px.array_len));
157
158 info->enabled = true;
159
160 if (format == ISL_FORMAT_UNSUPPORTED)
161 format = surf->surf->format;
162
163 info->surf = *surf->surf;
164 info->addr = surf->addr;
165
166 info->aux_usage = surf->aux_usage;
167 if (info->aux_usage != ISL_AUX_USAGE_NONE) {
168 info->aux_surf = *surf->aux_surf;
169 info->aux_addr = surf->aux_addr;
170 }
171
172 info->clear_color = surf->clear_color;
173 info->clear_color_addr = surf->clear_color_addr;
174
175 isl_surf_usage_flags_t view_usage;
176 if (is_dest) {
177 if (batch->flags & BLORP_BATCH_USE_COMPUTE)
178 view_usage = ISL_SURF_USAGE_STORAGE_BIT;
179 else
180 view_usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
181 } else {
182 view_usage = ISL_SURF_USAGE_TEXTURE_BIT;
183 }
184
185 info->view = (struct isl_view) {
186 .usage = view_usage,
187 .format = format,
188 .base_level = level,
189 .levels = 1,
190 .swizzle = ISL_SWIZZLE_IDENTITY,
191 };
192
193 info->view.array_len =
194 MAX2(u_minify(info->surf.logical_level0_px.depth, level),
195 info->surf.logical_level0_px.array_len);
196
197 if (!is_dest &&
198 (info->surf.dim == ISL_SURF_DIM_3D ||
199 info->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY)) {
200 /* 3-D textures don't support base_array layer and neither do 2-D
201 * multisampled textures on IVB so we need to pass it through the
202 * sampler in those cases. These are also two cases where we are
203 * guaranteed that we won't be doing any funny surface hacks.
204 */
205 info->view.base_array_layer = 0;
206 info->z_offset = layer;
207 } else {
208 info->view.base_array_layer = layer;
209
210 assert(info->view.array_len >= info->view.base_array_layer);
211 info->view.array_len -= info->view.base_array_layer;
212 info->z_offset = 0;
213 }
214
215 /* Sandy Bridge and earlier have a limit of a maximum of 512 layers for
216 * layered rendering.
217 */
218 if (is_dest && blorp->isl_dev->info->ver <= 6)
219 info->view.array_len = MIN2(info->view.array_len, 512);
220
221 if (surf->tile_x_sa || surf->tile_y_sa) {
222 /* This is only allowed on simple 2D surfaces without MSAA */
223 assert(info->surf.dim == ISL_SURF_DIM_2D);
224 assert(info->surf.samples == 1);
225 assert(info->surf.levels == 1);
226 assert(info->surf.logical_level0_px.array_len == 1);
227 assert(info->aux_usage == ISL_AUX_USAGE_NONE);
228
229 info->tile_x_sa = surf->tile_x_sa;
230 info->tile_y_sa = surf->tile_y_sa;
231
232 /* Instead of using the X/Y Offset fields in RENDER_SURFACE_STATE, we
233 * place the image at the tile boundary and offset our sampling or
234 * rendering. For this reason, we need to grow the image by the offset
235 * to ensure that the hardware doesn't think we've gone past the edge.
236 */
237 info->surf.logical_level0_px.w += surf->tile_x_sa;
238 info->surf.logical_level0_px.h += surf->tile_y_sa;
239 info->surf.phys_level0_sa.w += surf->tile_x_sa;
240 info->surf.phys_level0_sa.h += surf->tile_y_sa;
241 }
242 }
243
244
245 void
blorp_params_init(struct blorp_params * params)246 blorp_params_init(struct blorp_params *params)
247 {
248 memset(params, 0, sizeof(*params));
249 params->num_samples = 1;
250 params->num_draw_buffers = 1;
251 params->num_layers = 1;
252 }
253
254 void
blorp_hiz_op(struct blorp_batch * batch,struct blorp_surf * surf,uint32_t level,uint32_t start_layer,uint32_t num_layers,enum isl_aux_op op)255 blorp_hiz_op(struct blorp_batch *batch, struct blorp_surf *surf,
256 uint32_t level, uint32_t start_layer, uint32_t num_layers,
257 enum isl_aux_op op)
258 {
259 const struct intel_device_info *devinfo = batch->blorp->isl_dev->info;
260
261 struct blorp_params params;
262 blorp_params_init(¶ms);
263
264 params.hiz_op = op;
265 params.full_surface_hiz_op = true;
266 switch (op) {
267 case ISL_AUX_OP_FULL_RESOLVE:
268 params.op = BLORP_OP_HIZ_RESOLVE;
269 break;
270 case ISL_AUX_OP_AMBIGUATE:
271 params.op = BLORP_OP_HIZ_AMBIGUATE;
272 break;
273 case ISL_AUX_OP_FAST_CLEAR:
274 params.op = BLORP_OP_HIZ_CLEAR;
275 break;
276 case ISL_AUX_OP_PARTIAL_RESOLVE:
277 case ISL_AUX_OP_NONE:
278 unreachable("Invalid HiZ op");
279 }
280
281 for (uint32_t a = 0; a < num_layers; a++) {
282 const uint32_t layer = start_layer + a;
283
284 blorp_surface_info_init(batch, ¶ms.depth, surf, level,
285 layer, surf->surf->format, true);
286
287 /* Align the rectangle primitive to 8x4 pixels.
288 *
289 * During fast depth clears, the emitted rectangle primitive must be
290 * aligned to 8x4 pixels. From the Ivybridge PRM, Vol 2 Part 1 Section
291 * 11.5.3.1 Depth Buffer Clear (and the matching section in the
292 * Sandybridge PRM):
293 *
294 * If Number of Multisamples is NUMSAMPLES_1, the rectangle must be
295 * aligned to an 8x4 pixel block relative to the upper left corner
296 * of the depth buffer [...]
297 *
298 * For hiz resolves, the rectangle must also be 8x4 aligned. Item
299 * WaHizAmbiguate8x4Aligned from the Haswell workarounds page and the
300 * Ivybridge simulator require the alignment.
301 *
302 * To be safe, let's just align the rect for all hiz operations and all
303 * hardware generations.
304 *
305 * However, for some miptree slices of a Z24 texture, emitting an 8x4
306 * aligned rectangle that covers the slice may clobber adjacent slices
307 * if we strictly adhered to the texture alignments specified in the
308 * PRM. The Ivybridge PRM, Section "Alignment Unit Size", states that
309 * SURFACE_STATE.Surface_Horizontal_Alignment should be 4 for Z24
310 * surfaces, not 8. But commit 1f112cc increased the alignment from 4 to
311 * 8, which prevents the clobbering.
312 */
313 params.x1 = u_minify(params.depth.surf.logical_level0_px.width,
314 params.depth.view.base_level);
315 params.y1 = u_minify(params.depth.surf.logical_level0_px.height,
316 params.depth.view.base_level);
317 params.x1 = ALIGN(params.x1, 8);
318 params.y1 = ALIGN(params.y1, 4);
319
320 if (params.depth.view.base_level == 0) {
321 /* TODO: What about MSAA? */
322 params.depth.surf.logical_level0_px.width = params.x1;
323 params.depth.surf.logical_level0_px.height = params.y1;
324 } else if (devinfo->ver >= 8 && devinfo->ver <= 9 &&
325 op == ISL_AUX_OP_AMBIGUATE) {
326 /* On some platforms, it's not enough to just adjust the clear
327 * rectangle when the LOD is greater than 0.
328 *
329 * From the BDW and SKL PRMs, Vol 7, "Optimized Hierarchical Depth
330 * Buffer Resolve":
331 *
332 * The following is required when performing a hierarchical depth
333 * buffer resolve:
334 *
335 * - A rectangle primitive covering the full render target must be
336 * programmed on Xmin, Ymin, Xmax, and Ymax in the
337 * 3DSTATE_WM_HZ_OP command.
338 *
339 * - The rectangle primitive size must be aligned to 8x4 pixels.
340 *
341 * And from the Clear Rectangle programming note in 3DSTATE_WM_HZ_OP
342 * (Vol 2a):
343 *
344 * Hence the max values must be less than or equal to: ( Surface
345 * Width » LOD ) and ( Surface Height » LOD ) for X Max and Y Max
346 * respectively.
347 *
348 * This means that the extent of the LOD must be naturally
349 * 8x4-aligned after minification of the base LOD. Since the base LOD
350 * dimensions affect the placement of smaller LODs, it's not trivial
351 * (nor possible, at times) to satisfy the requirement by adjusting
352 * the base LOD extent. Just assert that the caller is accessing an
353 * LOD that satisfies this requirement.
354 */
355 assert(u_minify(params.depth.surf.logical_level0_px.width,
356 params.depth.view.base_level) == params.x1);
357 assert(u_minify(params.depth.surf.logical_level0_px.height,
358 params.depth.view.base_level) == params.y1);
359 }
360
361 params.dst.surf.samples = params.depth.surf.samples;
362 params.dst.surf.logical_level0_px = params.depth.surf.logical_level0_px;
363 params.depth_format =
364 isl_format_get_depth_format(surf->surf->format, false);
365 params.num_samples = params.depth.surf.samples;
366
367 batch->blorp->exec(batch, ¶ms);
368 }
369 }
370