1 /*
2 * Copyright 2010 Jerome Glisse <[email protected]>
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "r600_pipe.h"
7 #include "r600_public.h"
8 #include "r600_isa.h"
9 #include "r600_sfn.h"
10 #include "evergreen_compute.h"
11 #include "r600d.h"
12
13 #include <errno.h>
14 #include "pipe/p_shader_tokens.h"
15 #include "util/u_debug.h"
16 #include "util/u_endian.h"
17 #include "util/u_memory.h"
18 #include "util/u_screen.h"
19 #include "util/u_simple_shaders.h"
20 #include "util/u_upload_mgr.h"
21 #include "util/u_math.h"
22 #include "vl/vl_decoder.h"
23 #include "vl/vl_video_buffer.h"
24 #include "radeon_video.h"
25 #include "radeon_uvd.h"
26 #include "util/os_time.h"
27
28 static const struct debug_named_value r600_debug_options[] = {
29 /* features */
30 { "nocpdma", DBG_NO_CP_DMA, "Disable CP DMA" },
31
32 DEBUG_NAMED_VALUE_END /* must be last */
33 };
34
35 /*
36 * pipe_context
37 */
38
r600_destroy_context(struct pipe_context * context)39 static void r600_destroy_context(struct pipe_context *context)
40 {
41 struct r600_context *rctx = (struct r600_context *)context;
42 unsigned sh, i;
43
44 r600_isa_destroy(rctx->isa);
45
46 for (sh = 0; sh < (rctx->b.gfx_level < EVERGREEN ? R600_NUM_HW_STAGES : EG_NUM_HW_STAGES); sh++) {
47 r600_resource_reference(&rctx->scratch_buffers[sh].buffer, NULL);
48 }
49 r600_resource_reference(&rctx->dummy_cmask, NULL);
50 r600_resource_reference(&rctx->dummy_fmask, NULL);
51
52 if (rctx->append_fence)
53 pipe_resource_reference((struct pipe_resource**)&rctx->append_fence, NULL);
54 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
55 rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, NULL);
56 free(rctx->driver_consts[sh].constants);
57 }
58
59 if (rctx->fixed_func_tcs_shader)
60 rctx->b.b.delete_tcs_state(&rctx->b.b, rctx->fixed_func_tcs_shader);
61
62 if (rctx->dummy_pixel_shader) {
63 rctx->b.b.delete_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
64 }
65 if (rctx->custom_dsa_flush) {
66 rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush);
67 }
68 if (rctx->custom_blend_resolve) {
69 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_resolve);
70 }
71 if (rctx->custom_blend_decompress) {
72 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_decompress);
73 }
74 if (rctx->custom_blend_fastclear) {
75 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_fastclear);
76 }
77 util_unreference_framebuffer_state(&rctx->framebuffer.state);
78
79 if (rctx->gs_rings.gsvs_ring.buffer)
80 pipe_resource_reference(&rctx->gs_rings.gsvs_ring.buffer, NULL);
81
82 if (rctx->gs_rings.esgs_ring.buffer)
83 pipe_resource_reference(&rctx->gs_rings.esgs_ring.buffer, NULL);
84
85 for (sh = 0; sh < PIPE_SHADER_TYPES; ++sh)
86 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; ++i)
87 rctx->b.b.set_constant_buffer(context, sh, i, false, NULL);
88
89 if (rctx->blitter) {
90 util_blitter_destroy(rctx->blitter);
91 }
92 u_suballocator_destroy(&rctx->allocator_fetch_shader);
93
94 r600_release_command_buffer(&rctx->start_cs_cmd);
95
96 FREE(rctx->start_compute_cs_cmd.buf);
97
98 r600_common_context_cleanup(&rctx->b);
99
100 r600_resource_reference(&rctx->trace_buf, NULL);
101 r600_resource_reference(&rctx->last_trace_buf, NULL);
102 radeon_clear_saved_cs(&rctx->last_gfx);
103
104 switch (rctx->b.gfx_level) {
105 case EVERGREEN:
106 case CAYMAN:
107 for (i = 0; i < EG_MAX_ATOMIC_BUFFERS; ++i)
108 pipe_resource_reference(&rctx->atomic_buffer_state.buffer[i].buffer, NULL);
109 break;
110 default:
111 break;
112 }
113
114 FREE(rctx);
115 }
116
r600_create_context(struct pipe_screen * screen,void * priv,unsigned flags)117 static struct pipe_context *r600_create_context(struct pipe_screen *screen,
118 void *priv, unsigned flags)
119 {
120 struct r600_context *rctx = CALLOC_STRUCT(r600_context);
121 struct r600_screen* rscreen = (struct r600_screen *)screen;
122 struct radeon_winsys *ws = rscreen->b.ws;
123
124 if (!rctx)
125 return NULL;
126
127 rctx->b.b.screen = screen;
128 assert(!priv);
129 rctx->b.b.priv = NULL; /* for threaded_context_unwrap_sync */
130 rctx->b.b.destroy = r600_destroy_context;
131 rctx->b.set_atom_dirty = (void *)r600_set_atom_dirty;
132
133 if (!r600_common_context_init(&rctx->b, &rscreen->b, flags))
134 goto fail;
135
136 rctx->screen = rscreen;
137 list_inithead(&rctx->texture_buffers);
138
139 r600_init_blit_functions(rctx);
140
141 if (rscreen->b.info.ip[AMD_IP_UVD].num_queues) {
142 rctx->b.b.create_video_codec = r600_uvd_create_decoder;
143 rctx->b.b.create_video_buffer = r600_video_buffer_create;
144 } else {
145 rctx->b.b.create_video_codec = vl_create_decoder;
146 rctx->b.b.create_video_buffer = vl_video_buffer_create;
147 }
148
149 if (getenv("R600_TRACE"))
150 rctx->is_debug = true;
151 r600_init_common_state_functions(rctx);
152
153 switch (rctx->b.gfx_level) {
154 case R600:
155 case R700:
156 r600_init_state_functions(rctx);
157 r600_init_atom_start_cs(rctx);
158 rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
159 rctx->custom_blend_resolve = rctx->b.gfx_level == R700 ? r700_create_resolve_blend(rctx)
160 : r600_create_resolve_blend(rctx);
161 rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
162 rctx->has_vertex_cache = !(rctx->b.family == CHIP_RV610 ||
163 rctx->b.family == CHIP_RV620 ||
164 rctx->b.family == CHIP_RS780 ||
165 rctx->b.family == CHIP_RS880 ||
166 rctx->b.family == CHIP_RV710);
167 break;
168 case EVERGREEN:
169 case CAYMAN:
170 evergreen_init_state_functions(rctx);
171 evergreen_init_atom_start_cs(rctx);
172 evergreen_init_atom_start_compute_cs(rctx);
173 rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
174 rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
175 rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
176 rctx->custom_blend_fastclear = evergreen_create_fastclear_blend(rctx);
177 rctx->has_vertex_cache = !(rctx->b.family == CHIP_CEDAR ||
178 rctx->b.family == CHIP_PALM ||
179 rctx->b.family == CHIP_SUMO ||
180 rctx->b.family == CHIP_SUMO2 ||
181 rctx->b.family == CHIP_CAICOS ||
182 rctx->b.family == CHIP_CAYMAN ||
183 rctx->b.family == CHIP_ARUBA);
184
185 rctx->append_fence = pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
186 PIPE_USAGE_DEFAULT, 32);
187 break;
188 default:
189 R600_ERR("Unsupported gfx level %d.\n", rctx->b.gfx_level);
190 goto fail;
191 }
192
193 ws->cs_create(&rctx->b.gfx.cs, rctx->b.ctx, AMD_IP_GFX,
194 r600_context_gfx_flush, rctx);
195 rctx->b.gfx.flush = r600_context_gfx_flush;
196
197 u_suballocator_init(&rctx->allocator_fetch_shader, &rctx->b.b, 64 * 1024,
198 0, PIPE_USAGE_DEFAULT, 0, false);
199
200 rctx->isa = calloc(1, sizeof(struct r600_isa));
201 if (!rctx->isa || r600_isa_init(rctx->b.gfx_level, rctx->isa))
202 goto fail;
203
204 if (rscreen->b.debug_flags & DBG_FORCE_DMA)
205 rctx->b.b.resource_copy_region = rctx->b.dma_copy;
206
207 rctx->blitter = util_blitter_create(&rctx->b.b);
208 if (rctx->blitter == NULL)
209 goto fail;
210 util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
211 rctx->blitter->draw_rectangle = r600_draw_rectangle;
212
213 r600_begin_new_cs(rctx);
214
215 rctx->dummy_pixel_shader =
216 util_make_fragment_cloneinput_shader(&rctx->b.b, 0,
217 TGSI_SEMANTIC_GENERIC,
218 TGSI_INTERPOLATE_CONSTANT);
219 rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
220
221 return &rctx->b.b;
222
223 fail:
224 r600_destroy_context(&rctx->b.b);
225 return NULL;
226 }
227
228 /*
229 * pipe_screen
230 */
231
r600_get_param(struct pipe_screen * pscreen,enum pipe_cap param)232 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
233 {
234 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
235 enum radeon_family family = rscreen->b.family;
236
237 switch (param) {
238 /* Supported features (boolean caps). */
239 case PIPE_CAP_NPOT_TEXTURES:
240 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
241 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
242 case PIPE_CAP_ANISOTROPIC_FILTER:
243 case PIPE_CAP_OCCLUSION_QUERY:
244 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
245 case PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE:
246 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
247 case PIPE_CAP_TEXTURE_SWIZZLE:
248 case PIPE_CAP_DEPTH_CLIP_DISABLE:
249 case PIPE_CAP_DEPTH_CLIP_DISABLE_SEPARATE:
250 case PIPE_CAP_SHADER_STENCIL_EXPORT:
251 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
252 case PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT:
253 case PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
254 case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD:
255 case PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES:
256 case PIPE_CAP_SEAMLESS_CUBE_MAP:
257 case PIPE_CAP_PRIMITIVE_RESTART:
258 case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
259 case PIPE_CAP_CONDITIONAL_RENDER:
260 case PIPE_CAP_TEXTURE_BARRIER:
261 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
262 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
263 case PIPE_CAP_VS_INSTANCEID:
264 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
265 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
266 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
267 case PIPE_CAP_START_INSTANCE:
268 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
269 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
270 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
271 case PIPE_CAP_TEXTURE_MULTISAMPLE:
272 case PIPE_CAP_VS_WINDOW_SPACE_POSITION:
273 case PIPE_CAP_VS_LAYER_VIEWPORT:
274 case PIPE_CAP_SAMPLE_SHADING:
275 case PIPE_CAP_MEMOBJ:
276 case PIPE_CAP_CLIP_HALFZ:
277 case PIPE_CAP_POLYGON_OFFSET_CLAMP:
278 case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
279 case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
280 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
281 case PIPE_CAP_TEXTURE_QUERY_SAMPLES:
282 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
283 case PIPE_CAP_INVALIDATE_BUFFER:
284 case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS:
285 case PIPE_CAP_QUERY_MEMORY_INFO:
286 case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
287 case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED:
288 case PIPE_CAP_LEGACY_MATH_RULES:
289 case PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX:
290 case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
291 case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
292 return 1;
293
294 case PIPE_CAP_NIR_ATOMICS_AS_DEREF:
295 case PIPE_CAP_GL_SPIRV:
296 return 1;
297
298 case PIPE_CAP_TEXTURE_TRANSFER_MODES:
299 return PIPE_TEXTURE_TRANSFER_BLIT;
300
301 case PIPE_CAP_SHAREABLE_SHADERS:
302 return 0;
303
304 case PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET:
305 /* Optimal number for good TexSubImage performance on Polaris10. */
306 return 64 * 1024 * 1024;
307
308 case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
309 return 1;
310
311 case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
312 return !UTIL_ARCH_BIG_ENDIAN && rscreen->b.info.has_userptr;
313
314 case PIPE_CAP_COMPUTE:
315 return rscreen->b.gfx_level > R700;
316
317 case PIPE_CAP_TGSI_TEXCOORD:
318 return 1;
319
320 case PIPE_CAP_NIR_IMAGES_AS_DEREF:
321 case PIPE_CAP_FAKE_SW_MSAA:
322 return 0;
323
324 case PIPE_CAP_MAX_TEXEL_BUFFER_ELEMENTS_UINT:
325 return MIN2(rscreen->b.info.max_heap_size_kb * 1024ull / 4, INT_MAX);
326
327 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
328 return R600_MAP_BUFFER_ALIGNMENT;
329
330 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
331 return 256;
332
333 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
334 return 4;
335 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
336 case PIPE_CAP_GLSL_FEATURE_LEVEL:
337 if (family >= CHIP_CEDAR)
338 return 450;
339 return 330;
340
341 /* Supported except the original R600. */
342 case PIPE_CAP_INDEP_BLEND_ENABLE:
343 case PIPE_CAP_INDEP_BLEND_FUNC:
344 /* R600 doesn't support per-MRT blends */
345 return family == CHIP_R600 ? 0 : 1;
346
347 /* Supported on Evergreen. */
348 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
349 case PIPE_CAP_CUBE_MAP_ARRAY:
350 case PIPE_CAP_TEXTURE_GATHER_SM5:
351 case PIPE_CAP_TEXTURE_QUERY_LOD:
352 case PIPE_CAP_FS_FINE_DERIVATIVE:
353 case PIPE_CAP_SAMPLER_VIEW_TARGET:
354 case PIPE_CAP_SHADER_PACK_HALF_FLOAT:
355 case PIPE_CAP_SHADER_CLOCK:
356 case PIPE_CAP_SHADER_ARRAY_COMPONENTS:
357 case PIPE_CAP_QUERY_BUFFER_OBJECT:
358 case PIPE_CAP_IMAGE_STORE_FORMATTED:
359 case PIPE_CAP_ALPHA_TO_COVERAGE_DITHER_CONTROL:
360 return family >= CHIP_CEDAR ? 1 : 0;
361 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
362 return family >= CHIP_CEDAR ? 4 : 0;
363 case PIPE_CAP_DRAW_INDIRECT:
364 /* kernel command checker support is also required */
365 return family >= CHIP_CEDAR;
366
367 case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY:
368 return family >= CHIP_CEDAR ? 0 : 1;
369
370 case PIPE_CAP_MAX_COMBINED_SHADER_OUTPUT_RESOURCES:
371 return 8;
372
373 case PIPE_CAP_MAX_GS_INVOCATIONS:
374 return 32;
375
376 /* shader buffer objects */
377 case PIPE_CAP_MAX_SHADER_BUFFER_SIZE_UINT:
378 return 1 << 27;
379 case PIPE_CAP_MAX_COMBINED_SHADER_BUFFERS:
380 return 8;
381
382 case PIPE_CAP_INT64:
383 case PIPE_CAP_DOUBLES:
384 if (rscreen->b.family == CHIP_ARUBA ||
385 rscreen->b.family == CHIP_CAYMAN ||
386 rscreen->b.family == CHIP_CYPRESS ||
387 rscreen->b.family == CHIP_HEMLOCK)
388 return 1;
389 if (rscreen->b.family >= CHIP_CEDAR)
390 return 1;
391 return 0;
392
393 case PIPE_CAP_TWO_SIDED_COLOR:
394 return 0;
395 case PIPE_CAP_CULL_DISTANCE:
396 return 1;
397
398 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
399 if (family >= CHIP_CEDAR)
400 return 256;
401 return 0;
402
403 case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
404 if (family >= CHIP_CEDAR)
405 return 30;
406 else
407 return 0;
408 /* Stream output. */
409 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
410 return rscreen->b.has_streamout ? 4 : 0;
411 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
412 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
413 return rscreen->b.has_streamout ? 1 : 0;
414 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
415 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
416 return 32*4;
417
418 /* Geometry shader output. */
419 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
420 return 1024;
421 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
422 return 16384;
423 case PIPE_CAP_MAX_VERTEX_STREAMS:
424 return family >= CHIP_CEDAR ? 4 : 1;
425
426 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
427 /* Should be 2047, but 2048 is a requirement for GL 4.4 */
428 return 2048;
429
430 /* Texturing. */
431 case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
432 if (family >= CHIP_CEDAR)
433 return 16384;
434 else
435 return 8192;
436 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
437 if (family >= CHIP_CEDAR)
438 return 15;
439 else
440 return 14;
441 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
442 /* textures support 8192, but layered rendering supports 2048 */
443 return 12;
444 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
445 /* textures support 8192, but layered rendering supports 2048 */
446 return 2048;
447
448 /* Render targets. */
449 case PIPE_CAP_MAX_RENDER_TARGETS:
450 /* XXX some r6xx are buggy and can only do 4 */
451 return 8;
452
453 case PIPE_CAP_MAX_VIEWPORTS:
454 return R600_MAX_VIEWPORTS;
455 case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
456 case PIPE_CAP_RASTERIZER_SUBPIXEL_BITS:
457 return 8;
458
459 /* Timer queries, present when the clock frequency is non zero. */
460 case PIPE_CAP_QUERY_TIME_ELAPSED:
461 case PIPE_CAP_QUERY_TIMESTAMP:
462 return rscreen->b.info.clock_crystal_freq != 0;
463
464 case PIPE_CAP_TIMER_RESOLUTION:
465 /* Conversion to nanos from cycles per millisecond */
466 return DIV_ROUND_UP(1000000, rscreen->b.info.clock_crystal_freq);
467
468 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
469 case PIPE_CAP_MIN_TEXEL_OFFSET:
470 return -8;
471
472 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
473 case PIPE_CAP_MAX_TEXEL_OFFSET:
474 return 7;
475
476 case PIPE_CAP_MAX_VARYINGS:
477 return 32;
478
479 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
480 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600;
481 case PIPE_CAP_ENDIANNESS:
482 return PIPE_ENDIAN_LITTLE;
483
484 case PIPE_CAP_VENDOR_ID:
485 return ATI_VENDOR_ID;
486 case PIPE_CAP_DEVICE_ID:
487 return rscreen->b.info.pci_id;
488 case PIPE_CAP_ACCELERATED:
489 return 1;
490 case PIPE_CAP_VIDEO_MEMORY:
491 return rscreen->b.info.vram_size_kb >> 10;
492 case PIPE_CAP_UMA:
493 return 0;
494 case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
495 return rscreen->b.gfx_level >= R700;
496 case PIPE_CAP_PCI_GROUP:
497 return rscreen->b.info.pci.domain;
498 case PIPE_CAP_PCI_BUS:
499 return rscreen->b.info.pci.bus;
500 case PIPE_CAP_PCI_DEVICE:
501 return rscreen->b.info.pci.dev;
502 case PIPE_CAP_PCI_FUNCTION:
503 return rscreen->b.info.pci.func;
504
505 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS:
506 if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
507 return 8;
508 return 0;
509 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS:
510 if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
511 return EG_MAX_ATOMIC_BUFFERS;
512 return 0;
513
514 case PIPE_CAP_VALIDATE_ALL_DIRTY_STATES:
515 return 1;
516
517 default:
518 return u_pipe_screen_get_param_defaults(pscreen, param);
519 }
520 }
521
r600_get_shader_param(struct pipe_screen * pscreen,enum pipe_shader_type shader,enum pipe_shader_cap param)522 static int r600_get_shader_param(struct pipe_screen* pscreen,
523 enum pipe_shader_type shader,
524 enum pipe_shader_cap param)
525 {
526 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
527
528 switch(shader)
529 {
530 case PIPE_SHADER_FRAGMENT:
531 case PIPE_SHADER_VERTEX:
532 break;
533 case PIPE_SHADER_GEOMETRY:
534 break;
535 case PIPE_SHADER_TESS_CTRL:
536 case PIPE_SHADER_TESS_EVAL:
537 case PIPE_SHADER_COMPUTE:
538 if (rscreen->b.family >= CHIP_CEDAR)
539 break;
540 FALLTHROUGH;
541 default:
542 return 0;
543 }
544
545 switch (param) {
546 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
547 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
548 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
549 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
550 return 16384;
551 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
552 return 32;
553 case PIPE_SHADER_CAP_MAX_INPUTS:
554 return shader == PIPE_SHADER_VERTEX ? 16 : 32;
555 case PIPE_SHADER_CAP_MAX_OUTPUTS:
556 return shader == PIPE_SHADER_FRAGMENT ? 8 : 32;
557 case PIPE_SHADER_CAP_MAX_TEMPS:
558 return 256; /* Max native temporaries. */
559 case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
560 if (shader == PIPE_SHADER_COMPUTE) {
561 uint64_t max_const_buffer_size;
562 pscreen->get_compute_param(pscreen, PIPE_SHADER_IR_NIR,
563 PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE,
564 &max_const_buffer_size);
565 return MIN2(max_const_buffer_size, INT_MAX);
566
567 } else {
568 return R600_MAX_CONST_BUFFER_SIZE;
569 }
570 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
571 return R600_MAX_USER_CONST_BUFFERS;
572 case PIPE_SHADER_CAP_CONT_SUPPORTED:
573 return 1;
574 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
575 return 1;
576 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
577 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
578 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
579 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
580 return 1;
581 case PIPE_SHADER_CAP_SUBROUTINES:
582 case PIPE_SHADER_CAP_INT64_ATOMICS:
583 case PIPE_SHADER_CAP_FP16:
584 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
585 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
586 case PIPE_SHADER_CAP_INT16:
587 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
588 return 0;
589 case PIPE_SHADER_CAP_INTEGERS:
590 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
591 return 1;
592 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
593 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
594 return 16;
595 case PIPE_SHADER_CAP_SUPPORTED_IRS: {
596 int ir = 0;
597 if (shader == PIPE_SHADER_COMPUTE)
598 ir = 1 << PIPE_SHADER_IR_NATIVE;
599 ir |= 1 << PIPE_SHADER_IR_NIR;
600 return ir;
601 }
602 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
603 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
604 if (rscreen->b.family >= CHIP_CEDAR &&
605 (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE))
606 return 8;
607 return 0;
608 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
609 if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
610 return 8;
611 return 0;
612 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
613 /* having to allocate the atomics out amongst shaders stages is messy,
614 so give compute 8 buffers and all the others one */
615 if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics) {
616 return EG_MAX_ATOMIC_BUFFERS;
617 }
618 return 0;
619 }
620 return 0;
621 }
622
r600_destroy_screen(struct pipe_screen * pscreen)623 static void r600_destroy_screen(struct pipe_screen* pscreen)
624 {
625 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
626
627 if (!rscreen)
628 return;
629
630 if (!rscreen->b.ws->unref(rscreen->b.ws))
631 return;
632
633 if (rscreen->global_pool) {
634 compute_memory_pool_delete(rscreen->global_pool);
635 }
636
637 r600_destroy_common_screen(&rscreen->b);
638 }
639
r600_resource_create(struct pipe_screen * screen,const struct pipe_resource * templ)640 static struct pipe_resource *r600_resource_create(struct pipe_screen *screen,
641 const struct pipe_resource *templ)
642 {
643 if (templ->target == PIPE_BUFFER &&
644 (templ->bind & PIPE_BIND_GLOBAL))
645 return r600_compute_global_buffer_create(screen, templ);
646
647 return r600_resource_create_common(screen, templ);
648 }
649
r600_screen_create(struct radeon_winsys * ws,const struct pipe_screen_config * config)650 struct pipe_screen *r600_screen_create(struct radeon_winsys *ws,
651 const struct pipe_screen_config *config)
652 {
653 struct r600_screen *rscreen = CALLOC_STRUCT(r600_screen);
654
655 if (!rscreen) {
656 return NULL;
657 }
658
659 /* Set functions first. */
660 rscreen->b.b.context_create = r600_create_context;
661 rscreen->b.b.destroy = r600_destroy_screen;
662 rscreen->b.b.get_param = r600_get_param;
663 rscreen->b.b.get_shader_param = r600_get_shader_param;
664 rscreen->b.b.resource_create = r600_resource_create;
665
666 if (!r600_common_screen_init(&rscreen->b, ws)) {
667 FREE(rscreen);
668 return NULL;
669 }
670
671 if (rscreen->b.info.gfx_level >= EVERGREEN) {
672 rscreen->b.b.is_format_supported = evergreen_is_format_supported;
673 } else {
674 rscreen->b.b.is_format_supported = r600_is_format_supported;
675 }
676
677 rscreen->b.debug_flags |= debug_get_flags_option("R600_DEBUG", r600_debug_options, 0);
678 if (debug_get_bool_option("R600_DEBUG_COMPUTE", false))
679 rscreen->b.debug_flags |= DBG_COMPUTE;
680 if (debug_get_bool_option("R600_DUMP_SHADERS", false))
681 rscreen->b.debug_flags |= DBG_ALL_SHADERS | DBG_FS;
682 if (!debug_get_bool_option("R600_HYPERZ", true))
683 rscreen->b.debug_flags |= DBG_NO_HYPERZ;
684
685 if (rscreen->b.family == CHIP_UNKNOWN) {
686 fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->b.info.pci_id);
687 FREE(rscreen);
688 return NULL;
689 }
690
691 rscreen->b.b.finalize_nir = r600_finalize_nir;
692
693 rscreen->b.has_streamout = true;
694
695 rscreen->has_msaa = true;
696
697 /* MSAA support. */
698 switch (rscreen->b.gfx_level) {
699 case R600:
700 case R700:
701 rscreen->has_compressed_msaa_texturing = false;
702 break;
703 case EVERGREEN:
704 rscreen->has_compressed_msaa_texturing = true;
705 break;
706 case CAYMAN:
707 rscreen->has_compressed_msaa_texturing = true;
708 break;
709 default:
710 rscreen->has_compressed_msaa_texturing = false;
711 }
712
713 rscreen->b.has_cp_dma = !(rscreen->b.debug_flags & DBG_NO_CP_DMA);
714
715 rscreen->b.barrier_flags.cp_to_L2 =
716 R600_CONTEXT_INV_VERTEX_CACHE |
717 R600_CONTEXT_INV_TEX_CACHE |
718 R600_CONTEXT_INV_CONST_CACHE;
719 rscreen->b.barrier_flags.compute_to_L2 = R600_CONTEXT_CS_PARTIAL_FLUSH | R600_CONTEXT_FLUSH_AND_INV;
720
721 rscreen->global_pool = compute_memory_pool_new(rscreen);
722
723 /* Create the auxiliary context. This must be done last. */
724 rscreen->b.aux_context = rscreen->b.b.context_create(&rscreen->b.b, NULL, 0);
725
726 rscreen->has_atomics = true;
727 #if 0 /* This is for testing whether aux_context and buffer clearing work correctly. */
728 struct pipe_resource templ = {};
729
730 templ.width0 = 4;
731 templ.height0 = 2048;
732 templ.depth0 = 1;
733 templ.array_size = 1;
734 templ.target = PIPE_TEXTURE_2D;
735 templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
736 templ.usage = PIPE_USAGE_DEFAULT;
737
738 struct r600_resource *res = r600_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
739 unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_MAP_WRITE);
740
741 memset(map, 0, 256);
742
743 r600_screen_clear_buffer(rscreen, &res->b.b, 4, 4, 0xCC);
744 r600_screen_clear_buffer(rscreen, &res->b.b, 8, 4, 0xDD);
745 r600_screen_clear_buffer(rscreen, &res->b.b, 12, 4, 0xEE);
746 r600_screen_clear_buffer(rscreen, &res->b.b, 20, 4, 0xFF);
747 r600_screen_clear_buffer(rscreen, &res->b.b, 32, 20, 0x87);
748
749 ws->buffer_wait(res->buf, RADEON_USAGE_WRITE);
750
751 int i;
752 for (i = 0; i < 256; i++) {
753 printf("%02X", map[i]);
754 if (i % 16 == 15)
755 printf("\n");
756 }
757 #endif
758
759 if (rscreen->b.debug_flags & DBG_TEST_DMA)
760 r600_test_dma(&rscreen->b);
761
762 r600_query_fix_enabled_rb_mask(&rscreen->b);
763 return &rscreen->b.b;
764 }
765