1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <xf86drm.h>
24 #include "drm-uapi/nouveau_drm.h"
25 #include <nvif/class.h>
26 #include "util/format/u_format.h"
27 #include "util/format/u_format_s3tc.h"
28 #include "util/u_screen.h"
29 #include "pipe/p_screen.h"
30
31 #include "nouveau_vp3_video.h"
32
33 #include "nv50_ir_driver.h"
34
35 #include "nvc0/nvc0_context.h"
36 #include "nvc0/nvc0_screen.h"
37
38 #include "nvc0/mme/com9097.mme.h"
39 #include "nvc0/mme/com90c0.mme.h"
40 #include "nvc0/mme/comc597.mme.h"
41
42 #include "nv50/g80_texture.xml.h"
43
44 static bool
nvc0_screen_is_format_supported(struct pipe_screen * pscreen,enum pipe_format format,enum pipe_texture_target target,unsigned sample_count,unsigned storage_sample_count,unsigned bindings)45 nvc0_screen_is_format_supported(struct pipe_screen *pscreen,
46 enum pipe_format format,
47 enum pipe_texture_target target,
48 unsigned sample_count,
49 unsigned storage_sample_count,
50 unsigned bindings)
51 {
52 const struct util_format_description *desc = util_format_description(format);
53
54 if (sample_count > 8)
55 return false;
56 if (!(0x117 & (1 << sample_count))) /* 0, 1, 2, 4 or 8 */
57 return false;
58
59 if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
60 return false;
61
62 /* Short-circuit the rest of the logic -- this is used by the gallium frontend
63 * to determine valid MS levels in a no-attachments scenario.
64 */
65 if (format == PIPE_FORMAT_NONE && bindings & PIPE_BIND_RENDER_TARGET)
66 return true;
67
68 if ((bindings & PIPE_BIND_SAMPLER_VIEW) && (target != PIPE_BUFFER))
69 if (util_format_get_blocksizebits(format) == 3 * 32)
70 return false;
71
72 if (bindings & PIPE_BIND_LINEAR)
73 if (util_format_is_depth_or_stencil(format) ||
74 (target != PIPE_TEXTURE_1D &&
75 target != PIPE_TEXTURE_2D &&
76 target != PIPE_TEXTURE_RECT) ||
77 sample_count > 1)
78 return false;
79
80 /* Restrict ETC2 and ASTC formats here. These are only supported on GK20A
81 * and GM20B.
82 */
83 if ((desc->layout == UTIL_FORMAT_LAYOUT_ETC ||
84 desc->layout == UTIL_FORMAT_LAYOUT_ASTC) &&
85 nouveau_screen(pscreen)->device->chipset != 0x12b &&
86 nouveau_screen(pscreen)->class_3d != NVEA_3D_CLASS)
87 return false;
88
89 /* shared is always supported */
90 bindings &= ~(PIPE_BIND_LINEAR |
91 PIPE_BIND_SHARED);
92
93 if (bindings & PIPE_BIND_SHADER_IMAGE) {
94 if (format == PIPE_FORMAT_B8G8R8A8_UNORM &&
95 nouveau_screen(pscreen)->class_3d < NVE4_3D_CLASS) {
96 /* This should work on Fermi, but for currently unknown reasons it
97 * does not and results in breaking reads from pbos. */
98 return false;
99 }
100 }
101
102 if (bindings & PIPE_BIND_INDEX_BUFFER) {
103 if (format != PIPE_FORMAT_R8_UINT &&
104 format != PIPE_FORMAT_R16_UINT &&
105 format != PIPE_FORMAT_R32_UINT)
106 return false;
107 bindings &= ~PIPE_BIND_INDEX_BUFFER;
108 }
109
110 return (( nvc0_format_table[format].usage |
111 nvc0_vertex_format[format].usage) & bindings) == bindings;
112 }
113
114 static int
nvc0_screen_get_param(struct pipe_screen * pscreen,enum pipe_cap param)115 nvc0_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
116 {
117 const uint16_t class_3d = nouveau_screen(pscreen)->class_3d;
118 const struct nouveau_screen *screen = nouveau_screen(pscreen);
119 struct nouveau_device *dev = screen->device;
120
121 switch (param) {
122 /* non-boolean caps */
123 case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
124 return 16384;
125 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
126 return 15;
127 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
128 return 12;
129 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
130 return 2048;
131 case PIPE_CAP_MIN_TEXEL_OFFSET:
132 return -8;
133 case PIPE_CAP_MAX_TEXEL_OFFSET:
134 return 7;
135 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
136 return -32;
137 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
138 return 31;
139 case PIPE_CAP_MAX_TEXEL_BUFFER_ELEMENTS_UINT:
140 return 128 * 1024 * 1024;
141 case PIPE_CAP_GLSL_FEATURE_LEVEL:
142 return 430;
143 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
144 return 430;
145 case PIPE_CAP_MAX_RENDER_TARGETS:
146 return 8;
147 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
148 return 1;
149 case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
150 case PIPE_CAP_RASTERIZER_SUBPIXEL_BITS:
151 return 8;
152 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
153 return 4;
154 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
155 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
156 return 128;
157 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
158 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
159 return 1024;
160 case PIPE_CAP_MAX_VERTEX_STREAMS:
161 return 4;
162 case PIPE_CAP_MAX_GS_INVOCATIONS:
163 return 32;
164 case PIPE_CAP_MAX_SHADER_BUFFER_SIZE_UINT:
165 return 1 << 27;
166 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
167 return 2048;
168 case PIPE_CAP_MAX_VERTEX_ELEMENT_SRC_OFFSET:
169 return 2047;
170 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
171 return 256;
172 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
173 if (class_3d < GM107_3D_CLASS)
174 return 256; /* IMAGE bindings require alignment to 256 */
175 return 16;
176 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
177 return 16;
178 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
179 return NOUVEAU_MIN_BUFFER_MAP_ALIGN;
180 case PIPE_CAP_MAX_VIEWPORTS:
181 return NVC0_MAX_VIEWPORTS;
182 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
183 return 4;
184 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
185 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_NV50;
186 case PIPE_CAP_ENDIANNESS:
187 return PIPE_ENDIAN_LITTLE;
188 case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
189 return 30;
190 case PIPE_CAP_MAX_WINDOW_RECTANGLES:
191 return NVC0_MAX_WINDOW_RECTANGLES;
192 case PIPE_CAP_MAX_CONSERVATIVE_RASTER_SUBPIXEL_PRECISION_BIAS:
193 return class_3d >= GM200_3D_CLASS ? 8 : 0;
194 case PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET:
195 return 64 * 1024 * 1024;
196 case PIPE_CAP_MAX_VARYINGS:
197 /* NOTE: These only count our slots for GENERIC varyings.
198 * The address space may be larger, but the actual hard limit seems to be
199 * less than what the address space layout permits, so don't add TEXCOORD,
200 * COLOR, etc. here.
201 */
202 return 0x1f0 / 16;
203 case PIPE_CAP_MAX_VERTEX_BUFFERS:
204 return 16;
205 case PIPE_CAP_GL_BEGIN_END_BUFFER_SIZE:
206 return 512 * 1024; /* TODO: Investigate tuning this */
207 case PIPE_CAP_MAX_TEXTURE_MB:
208 return 0; /* TODO: use 1/2 of VRAM for this? */
209
210 case PIPE_CAP_TIMER_RESOLUTION:
211 return 1000;
212
213 case PIPE_CAP_SUPPORTED_PRIM_MODES_WITH_RESTART:
214 case PIPE_CAP_SUPPORTED_PRIM_MODES:
215 return BITFIELD_MASK(MESA_PRIM_COUNT);
216
217 /* supported caps */
218 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
219 case PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE:
220 case PIPE_CAP_TEXTURE_SWIZZLE:
221 case PIPE_CAP_NPOT_TEXTURES:
222 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
223 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
224 case PIPE_CAP_ANISOTROPIC_FILTER:
225 case PIPE_CAP_SEAMLESS_CUBE_MAP:
226 case PIPE_CAP_CUBE_MAP_ARRAY:
227 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
228 case PIPE_CAP_TEXTURE_MULTISAMPLE:
229 case PIPE_CAP_DEPTH_CLIP_DISABLE:
230 case PIPE_CAP_TGSI_TEXCOORD:
231 case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD:
232 case PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES:
233 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
234 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
235 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
236 case PIPE_CAP_QUERY_TIMESTAMP:
237 case PIPE_CAP_QUERY_TIME_ELAPSED:
238 case PIPE_CAP_OCCLUSION_QUERY:
239 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
240 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
241 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
242 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
243 case PIPE_CAP_INDEP_BLEND_ENABLE:
244 case PIPE_CAP_INDEP_BLEND_FUNC:
245 case PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT:
246 case PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
247 case PIPE_CAP_PRIMITIVE_RESTART:
248 case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
249 case PIPE_CAP_VS_INSTANCEID:
250 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
251 case PIPE_CAP_CONDITIONAL_RENDER:
252 case PIPE_CAP_TEXTURE_BARRIER:
253 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
254 case PIPE_CAP_START_INSTANCE:
255 case PIPE_CAP_DRAW_INDIRECT:
256 case PIPE_CAP_USER_VERTEX_BUFFERS:
257 case PIPE_CAP_TEXTURE_QUERY_LOD:
258 case PIPE_CAP_SAMPLE_SHADING:
259 case PIPE_CAP_TEXTURE_GATHER_OFFSETS:
260 case PIPE_CAP_TEXTURE_GATHER_SM5:
261 case PIPE_CAP_FS_FINE_DERIVATIVE:
262 case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
263 case PIPE_CAP_SAMPLER_VIEW_TARGET:
264 case PIPE_CAP_CLIP_HALFZ:
265 case PIPE_CAP_POLYGON_OFFSET_CLAMP:
266 case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
267 case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
268 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
269 case PIPE_CAP_DEPTH_BOUNDS_TEST:
270 case PIPE_CAP_TEXTURE_QUERY_SAMPLES:
271 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
272 case PIPE_CAP_FORCE_PERSAMPLE_INTERP:
273 case PIPE_CAP_DRAW_PARAMETERS:
274 case PIPE_CAP_SHADER_PACK_HALF_FLOAT:
275 case PIPE_CAP_MULTI_DRAW_INDIRECT:
276 case PIPE_CAP_MEMOBJ:
277 case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS:
278 case PIPE_CAP_FS_FACE_IS_INTEGER_SYSVAL:
279 case PIPE_CAP_QUERY_BUFFER_OBJECT:
280 case PIPE_CAP_INVALIDATE_BUFFER:
281 case PIPE_CAP_STRING_MARKER:
282 case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
283 case PIPE_CAP_CULL_DISTANCE:
284 case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
285 case PIPE_CAP_SHADER_GROUP_VOTE:
286 case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED:
287 case PIPE_CAP_SHADER_ARRAY_COMPONENTS:
288 case PIPE_CAP_LEGACY_MATH_RULES:
289 case PIPE_CAP_DOUBLES:
290 case PIPE_CAP_INT64:
291 case PIPE_CAP_TGSI_TEX_TXF_LZ:
292 case PIPE_CAP_SHADER_CLOCK:
293 case PIPE_CAP_COMPUTE:
294 case PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX:
295 case PIPE_CAP_QUERY_SO_OVERFLOW:
296 case PIPE_CAP_TGSI_DIV:
297 case PIPE_CAP_IMAGE_ATOMIC_INC_WRAP:
298 case PIPE_CAP_DEMOTE_TO_HELPER_INVOCATION:
299 case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
300 case PIPE_CAP_TEXTURE_SHADOW_LOD:
301 case PIPE_CAP_CLEAR_SCISSORED:
302 case PIPE_CAP_IMAGE_STORE_FORMATTED:
303 case PIPE_CAP_QUERY_MEMORY_INFO:
304 return 1;
305 case PIPE_CAP_TEXTURE_TRANSFER_MODES:
306 return nouveau_screen(pscreen)->vram_domain & NOUVEAU_BO_VRAM ? PIPE_TEXTURE_TRANSFER_BLIT : 0;
307 case PIPE_CAP_FBFETCH:
308 return class_3d >= NVE4_3D_CLASS ? 1 : 0; /* needs testing on fermi */
309 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
310 case PIPE_CAP_SHADER_BALLOT:
311 return class_3d >= NVE4_3D_CLASS;
312 case PIPE_CAP_BINDLESS_TEXTURE:
313 return class_3d >= NVE4_3D_CLASS;
314 case PIPE_CAP_IMAGE_ATOMIC_FLOAT_ADD:
315 return class_3d < GM107_3D_CLASS; /* needs additional lowering */
316 case PIPE_CAP_POLYGON_MODE_FILL_RECTANGLE:
317 case PIPE_CAP_VS_LAYER_VIEWPORT:
318 case PIPE_CAP_TES_LAYER_VIEWPORT:
319 case PIPE_CAP_POST_DEPTH_COVERAGE:
320 case PIPE_CAP_CONSERVATIVE_RASTER_POST_SNAP_TRIANGLES:
321 case PIPE_CAP_CONSERVATIVE_RASTER_POST_SNAP_POINTS_LINES:
322 case PIPE_CAP_CONSERVATIVE_RASTER_POST_DEPTH_COVERAGE:
323 case PIPE_CAP_PROGRAMMABLE_SAMPLE_LOCATIONS:
324 case PIPE_CAP_VIEWPORT_SWIZZLE:
325 case PIPE_CAP_VIEWPORT_MASK:
326 case PIPE_CAP_SAMPLER_REDUCTION_MINMAX:
327 return class_3d >= GM200_3D_CLASS;
328 case PIPE_CAP_CONSERVATIVE_RASTER_PRE_SNAP_TRIANGLES:
329 return class_3d >= GP100_3D_CLASS;
330 case PIPE_CAP_RESOURCE_FROM_USER_MEMORY_COMPUTE_ONLY:
331 case PIPE_CAP_SYSTEM_SVM:
332 return screen->has_svm ? 1 : 0;
333
334 case PIPE_CAP_GL_SPIRV:
335 case PIPE_CAP_GL_SPIRV_VARIABLE_POINTERS:
336 return 1;
337
338 /* nir related caps */
339 case PIPE_CAP_NIR_IMAGES_AS_DEREF:
340 return 0;
341
342 case PIPE_CAP_PCI_GROUP:
343 return dev->info.pci.domain;
344 case PIPE_CAP_PCI_BUS:
345 return dev->info.pci.bus;
346 case PIPE_CAP_PCI_DEVICE:
347 return dev->info.pci.dev;
348 case PIPE_CAP_PCI_FUNCTION:
349 return dev->info.pci.func;
350
351 case PIPE_CAP_OPENCL_INTEGER_FUNCTIONS: /* could be done */
352 case PIPE_CAP_INTEGER_MULTIPLY_32X16: /* could be done */
353 case PIPE_CAP_MAP_UNSYNCHRONIZED_THREAD_SAFE: /* when we fix MT stuff */
354 case PIPE_CAP_ALPHA_TO_COVERAGE_DITHER_CONTROL: /* TODO */
355 case PIPE_CAP_SHADER_ATOMIC_INT64: /* TODO */
356 case PIPE_CAP_HARDWARE_GL_SELECT:
357 return 0;
358
359 case PIPE_CAP_VENDOR_ID:
360 return 0x10de;
361 case PIPE_CAP_DEVICE_ID:
362 return dev->info.device_id;
363 case PIPE_CAP_ACCELERATED:
364 return 1;
365 case PIPE_CAP_VIDEO_MEMORY:
366 return dev->vram_size >> 20;
367 case PIPE_CAP_UMA:
368 return nouveau_screen(pscreen)->is_uma;
369
370 default:
371 return u_pipe_screen_get_param_defaults(pscreen, param);
372 }
373 }
374
375 static int
nvc0_screen_get_shader_param(struct pipe_screen * pscreen,enum pipe_shader_type shader,enum pipe_shader_cap param)376 nvc0_screen_get_shader_param(struct pipe_screen *pscreen,
377 enum pipe_shader_type shader,
378 enum pipe_shader_cap param)
379 {
380 const struct nouveau_screen *screen = nouveau_screen(pscreen);
381 const uint16_t class_3d = screen->class_3d;
382
383 switch (shader) {
384 case PIPE_SHADER_VERTEX:
385 case PIPE_SHADER_GEOMETRY:
386 case PIPE_SHADER_FRAGMENT:
387 case PIPE_SHADER_COMPUTE:
388 case PIPE_SHADER_TESS_CTRL:
389 case PIPE_SHADER_TESS_EVAL:
390 break;
391 default:
392 return 0;
393 }
394
395 switch (param) {
396 case PIPE_SHADER_CAP_SUPPORTED_IRS: {
397 uint32_t irs = 1 << PIPE_SHADER_IR_NIR;
398 if (screen->force_enable_cl)
399 irs |= 1 << PIPE_SHADER_IR_NIR_SERIALIZED;
400 return irs;
401 }
402 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
403 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
404 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
405 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
406 return 16384;
407 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
408 return 16;
409 case PIPE_SHADER_CAP_MAX_INPUTS:
410 return 0x200 / 16;
411 case PIPE_SHADER_CAP_MAX_OUTPUTS:
412 return 32;
413 case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
414 return NVC0_MAX_CONSTBUF_SIZE;
415 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
416 return NVC0_MAX_PIPE_CONSTBUFS;
417 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
418 return shader != PIPE_SHADER_FRAGMENT;
419 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
420 /* HW doesn't support indirect addressing of fragment program inputs
421 * on Volta. The binary driver generates a function to handle every
422 * possible indirection, and indirectly calls the function to handle
423 * this instead.
424 */
425 if (class_3d >= GV100_3D_CLASS)
426 return shader != PIPE_SHADER_FRAGMENT;
427 return 1;
428 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
429 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
430 return 1;
431 case PIPE_SHADER_CAP_MAX_TEMPS:
432 return NVC0_CAP_MAX_PROGRAM_TEMPS;
433 case PIPE_SHADER_CAP_CONT_SUPPORTED:
434 return 1;
435 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
436 return 1;
437 case PIPE_SHADER_CAP_SUBROUTINES:
438 return 1;
439 case PIPE_SHADER_CAP_INTEGERS:
440 return 1;
441 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
442 case PIPE_SHADER_CAP_INT64_ATOMICS:
443 case PIPE_SHADER_CAP_FP16:
444 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
445 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
446 case PIPE_SHADER_CAP_INT16:
447 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
448 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
449 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
450 return 0;
451 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
452 return NVC0_MAX_BUFFERS;
453 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
454 return (class_3d >= NVE4_3D_CLASS) ? 32 : 16;
455 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
456 return (class_3d >= NVE4_3D_CLASS) ? 32 : 16;
457 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
458 if (class_3d >= NVE4_3D_CLASS)
459 return NVC0_MAX_IMAGES;
460 if (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE)
461 return NVC0_MAX_IMAGES;
462 return 0;
463 default:
464 NOUVEAU_ERR("unknown PIPE_SHADER_CAP %d\n", param);
465 return 0;
466 }
467 }
468
469 static float
nvc0_screen_get_paramf(struct pipe_screen * pscreen,enum pipe_capf param)470 nvc0_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
471 {
472 const uint16_t class_3d = nouveau_screen(pscreen)->class_3d;
473
474 switch (param) {
475 case PIPE_CAPF_MIN_LINE_WIDTH:
476 case PIPE_CAPF_MIN_LINE_WIDTH_AA:
477 case PIPE_CAPF_MIN_POINT_SIZE:
478 case PIPE_CAPF_MIN_POINT_SIZE_AA:
479 return 1;
480 case PIPE_CAPF_POINT_SIZE_GRANULARITY:
481 case PIPE_CAPF_LINE_WIDTH_GRANULARITY:
482 return 0.1;
483 case PIPE_CAPF_MAX_LINE_WIDTH:
484 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
485 return 10.0f;
486 case PIPE_CAPF_MAX_POINT_SIZE:
487 return 63.0f;
488 case PIPE_CAPF_MAX_POINT_SIZE_AA:
489 return 63.375f;
490 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
491 return 16.0f;
492 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
493 return 15.0f;
494 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
495 return 0.0f;
496 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
497 return class_3d >= GM200_3D_CLASS ? 0.75f : 0.0f;
498 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
499 return class_3d >= GM200_3D_CLASS ? 0.25f : 0.0f;
500 }
501
502 NOUVEAU_ERR("unknown PIPE_CAPF %d\n", param);
503 return 0.0f;
504 }
505
506 static int
nvc0_screen_get_compute_param(struct pipe_screen * pscreen,enum pipe_shader_ir ir_type,enum pipe_compute_cap param,void * data)507 nvc0_screen_get_compute_param(struct pipe_screen *pscreen,
508 enum pipe_shader_ir ir_type,
509 enum pipe_compute_cap param, void *data)
510 {
511 struct nvc0_screen *screen = nvc0_screen(pscreen);
512 struct nouveau_device *dev = screen->base.device;
513 const uint16_t obj_class = screen->compute->oclass;
514
515 #define RET(x) do { \
516 if (data) \
517 memcpy(data, x, sizeof(x)); \
518 return sizeof(x); \
519 } while (0)
520
521 switch (param) {
522 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
523 RET((uint64_t []) { 3 });
524 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
525 if (obj_class >= NVE4_COMPUTE_CLASS) {
526 RET(((uint64_t []) { 0x7fffffff, 65535, 65535 }));
527 } else {
528 RET(((uint64_t []) { 65535, 65535, 65535 }));
529 }
530 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
531 RET(((uint64_t []) { 1024, 1024, 64 }));
532 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
533 RET((uint64_t []) { 1024 });
534 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
535 if (obj_class >= NVE4_COMPUTE_CLASS) {
536 RET((uint64_t []) { 1024 });
537 } else {
538 RET((uint64_t []) { 512 });
539 }
540 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE: /* g[] */
541 RET((uint64_t []) { nouveau_device_get_global_mem_size(dev) });
542 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE: /* s[] */
543 switch (obj_class) {
544 case GM200_COMPUTE_CLASS:
545 RET((uint64_t []) { 96 << 10 });
546 case GM107_COMPUTE_CLASS:
547 RET((uint64_t []) { 64 << 10 });
548 default:
549 RET((uint64_t []) { 48 << 10 });
550 }
551 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE: /* l[] */
552 RET((uint64_t []) { 512 << 10 });
553 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE: /* c[], arbitrary limit */
554 RET((uint64_t []) { 4096 });
555 case PIPE_COMPUTE_CAP_SUBGROUP_SIZES:
556 RET((uint32_t []) { 32 });
557 case PIPE_COMPUTE_CAP_MAX_SUBGROUPS:
558 RET((uint32_t []) { 0 });
559 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
560 RET((uint64_t []) { nouveau_device_get_global_mem_size(dev) });
561 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
562 RET((uint32_t []) { NVC0_MAX_IMAGES });
563 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
564 RET((uint32_t []) { screen->mp_count_compute });
565 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
566 RET((uint32_t []) { 512 }); /* FIXME: arbitrary limit */
567 case PIPE_COMPUTE_CAP_ADDRESS_BITS:
568 RET((uint32_t []) { 64 });
569 default:
570 return 0;
571 }
572
573 #undef RET
574 }
575
576 static void
nvc0_screen_get_sample_pixel_grid(struct pipe_screen * pscreen,unsigned sample_count,unsigned * width,unsigned * height)577 nvc0_screen_get_sample_pixel_grid(struct pipe_screen *pscreen,
578 unsigned sample_count,
579 unsigned *width, unsigned *height)
580 {
581 switch (sample_count) {
582 case 0:
583 case 1:
584 /* this could be 4x4, but the GL state tracker makes it difficult to
585 * create a 1x MSAA texture and smaller grids save CB space */
586 *width = 2;
587 *height = 4;
588 break;
589 case 2:
590 *width = 2;
591 *height = 4;
592 break;
593 case 4:
594 *width = 2;
595 *height = 2;
596 break;
597 case 8:
598 *width = 1;
599 *height = 2;
600 break;
601 default:
602 assert(0);
603 }
604 }
605
606 static void
nvc0_screen_destroy(struct pipe_screen * pscreen)607 nvc0_screen_destroy(struct pipe_screen *pscreen)
608 {
609 struct nvc0_screen *screen = nvc0_screen(pscreen);
610
611 if (!nouveau_drm_screen_unref(&screen->base))
612 return;
613
614 if (screen->blitter)
615 nvc0_blitter_destroy(screen);
616 if (screen->pm.prog) {
617 screen->pm.prog->code = NULL; /* hardcoded, don't FREE */
618 nvc0_program_destroy(NULL, screen->pm.prog);
619 FREE(screen->pm.prog);
620 }
621
622 nouveau_bo_ref(NULL, &screen->text);
623 nouveau_bo_ref(NULL, &screen->uniform_bo);
624 nouveau_bo_ref(NULL, &screen->tls);
625 nouveau_bo_ref(NULL, &screen->txc);
626 nouveau_bo_ref(NULL, &screen->fence.bo);
627 nouveau_bo_ref(NULL, &screen->poly_cache);
628
629 nouveau_heap_free(&screen->lib_code);
630 nouveau_heap_destroy(&screen->text_heap);
631
632 FREE(screen->tic.entries);
633
634 nouveau_object_del(&screen->eng3d);
635 nouveau_object_del(&screen->eng2d);
636 nouveau_object_del(&screen->m2mf);
637 nouveau_object_del(&screen->copy);
638 nouveau_object_del(&screen->compute);
639 nouveau_object_del(&screen->nvsw);
640
641 nouveau_screen_fini(&screen->base);
642 simple_mtx_destroy(&screen->state_lock);
643
644 FREE(screen);
645 }
646
647 static int
nvc0_graph_set_macro(struct nvc0_screen * screen,uint32_t m,unsigned pos,unsigned size,const uint32_t * data)648 nvc0_graph_set_macro(struct nvc0_screen *screen, uint32_t m, unsigned pos,
649 unsigned size, const uint32_t *data)
650 {
651 struct nouveau_pushbuf *push = screen->base.pushbuf;
652
653 size /= 4;
654
655 assert((pos + size) <= 0x800);
656
657 BEGIN_NVC0(push, SUBC_3D(NVC0_GRAPH_MACRO_ID), 2);
658 PUSH_DATA (push, (m - 0x3800) / 8);
659 PUSH_DATA (push, pos);
660 BEGIN_1IC0(push, SUBC_3D(NVC0_GRAPH_MACRO_UPLOAD_POS), size + 1);
661 PUSH_DATA (push, pos);
662 PUSH_DATAp(push, data, size);
663
664 return pos + size;
665 }
666
667 static int
tu102_graph_set_macro(struct nvc0_screen * screen,uint32_t m,unsigned pos,unsigned size,const uint32_t * data)668 tu102_graph_set_macro(struct nvc0_screen *screen, uint32_t m, unsigned pos,
669 unsigned size, const uint32_t *data)
670 {
671 struct nouveau_pushbuf *push = screen->base.pushbuf;
672
673 size /= 4;
674
675 assert((pos + size) <= 0x800);
676
677 BEGIN_NVC0(push, SUBC_3D(NVC0_GRAPH_MACRO_ID), 2);
678 PUSH_DATA (push, (m - 0x3800) / 8);
679 PUSH_DATA (push, pos);
680 BEGIN_1IC0(push, SUBC_3D(NVC0_GRAPH_MACRO_UPLOAD_POS), size + 1);
681 PUSH_DATA (push, pos);
682 PUSH_DATAp(push, data, size);
683
684 return pos + (size / 3);
685 }
686
687 static void
nvc0_magic_3d_init(struct nouveau_pushbuf * push,uint16_t obj_class)688 nvc0_magic_3d_init(struct nouveau_pushbuf *push, uint16_t obj_class)
689 {
690 BEGIN_NVC0(push, SUBC_3D(0x10cc), 1);
691 PUSH_DATA (push, 0xff);
692 BEGIN_NVC0(push, SUBC_3D(0x10e0), 2);
693 PUSH_DATA (push, 0xff);
694 PUSH_DATA (push, 0xff);
695 BEGIN_NVC0(push, SUBC_3D(0x10ec), 2);
696 PUSH_DATA (push, 0xff);
697 PUSH_DATA (push, 0xff);
698 if (obj_class < GV100_3D_CLASS) {
699 BEGIN_NVC0(push, SUBC_3D(0x074c), 1);
700 PUSH_DATA (push, 0x3f);
701 }
702
703 BEGIN_NVC0(push, SUBC_3D(0x16a8), 1);
704 PUSH_DATA (push, (3 << 16) | 3);
705 BEGIN_NVC0(push, SUBC_3D(0x1794), 1);
706 PUSH_DATA (push, (2 << 16) | 2);
707
708 if (obj_class < GM107_3D_CLASS) {
709 BEGIN_NVC0(push, SUBC_3D(0x12ac), 1);
710 PUSH_DATA (push, 0);
711 }
712 BEGIN_NVC0(push, SUBC_3D(0x0218), 1);
713 PUSH_DATA (push, 0x10);
714 BEGIN_NVC0(push, SUBC_3D(0x10fc), 1);
715 PUSH_DATA (push, 0x10);
716 BEGIN_NVC0(push, SUBC_3D(0x1290), 1);
717 PUSH_DATA (push, 0x10);
718 BEGIN_NVC0(push, SUBC_3D(0x12d8), 2);
719 PUSH_DATA (push, 0x10);
720 PUSH_DATA (push, 0x10);
721 BEGIN_NVC0(push, SUBC_3D(0x1140), 1);
722 PUSH_DATA (push, 0x10);
723 BEGIN_NVC0(push, SUBC_3D(0x1610), 1);
724 PUSH_DATA (push, 0xe);
725
726 BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_GEN_MODE), 1);
727 PUSH_DATA (push, NVC0_3D_VERTEX_ID_GEN_MODE_DRAW_ARRAYS_ADD_START);
728 BEGIN_NVC0(push, SUBC_3D(0x030c), 1);
729 PUSH_DATA (push, 0);
730 BEGIN_NVC0(push, SUBC_3D(0x0300), 1);
731 PUSH_DATA (push, 3);
732
733 if (obj_class < GV100_3D_CLASS) {
734 BEGIN_NVC0(push, SUBC_3D(0x02d0), 1);
735 PUSH_DATA (push, 0x3fffff);
736 }
737 BEGIN_NVC0(push, SUBC_3D(0x0fdc), 1);
738 PUSH_DATA (push, 1);
739 BEGIN_NVC0(push, SUBC_3D(0x19c0), 1);
740 PUSH_DATA (push, 1);
741
742 if (obj_class < GM107_3D_CLASS) {
743 BEGIN_NVC0(push, SUBC_3D(0x075c), 1);
744 PUSH_DATA (push, 3);
745
746 if (obj_class >= NVE4_3D_CLASS) {
747 BEGIN_NVC0(push, SUBC_3D(0x07fc), 1);
748 PUSH_DATA (push, 1);
749 }
750 }
751
752 /* TODO: find out what software methods 0x1528, 0x1280 and (on nve4) 0x02dc
753 * are supposed to do */
754 }
755
756 static void
nvc0_screen_fence_emit(struct pipe_context * pcontext,u32 * sequence,struct nouveau_bo * wait)757 nvc0_screen_fence_emit(struct pipe_context *pcontext, u32 *sequence,
758 struct nouveau_bo *wait)
759 {
760 struct nvc0_context *nvc0 = nvc0_context(pcontext);
761 struct nvc0_screen *screen = nvc0->screen;
762 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
763 struct nouveau_pushbuf_refn ref = { wait, NOUVEAU_BO_GART | NOUVEAU_BO_RDWR };
764
765 /* we need to do it after possible flush in MARK_RING */
766 *sequence = ++screen->base.fence.sequence;
767
768 assert(PUSH_AVAIL(push) + push->rsvd_kick >= 5);
769 PUSH_DATA (push, NVC0_FIFO_PKHDR_SQ(NVC0_3D(QUERY_ADDRESS_HIGH), 4));
770 PUSH_DATAh(push, screen->fence.bo->offset);
771 PUSH_DATA (push, screen->fence.bo->offset);
772 PUSH_DATA (push, *sequence);
773 PUSH_DATA (push, NVC0_3D_QUERY_GET_FENCE | NVC0_3D_QUERY_GET_SHORT |
774 (0xf << NVC0_3D_QUERY_GET_UNIT__SHIFT));
775
776 nouveau_pushbuf_refn(push, &ref, 1);
777 }
778
779 static u32
nvc0_screen_fence_update(struct pipe_screen * pscreen)780 nvc0_screen_fence_update(struct pipe_screen *pscreen)
781 {
782 struct nvc0_screen *screen = nvc0_screen(pscreen);
783 return screen->fence.map[0];
784 }
785
786 static int
nvc0_screen_init_compute(struct nvc0_screen * screen)787 nvc0_screen_init_compute(struct nvc0_screen *screen)
788 {
789 const struct nouveau_mclass computes[] = {
790 { AD102_COMPUTE_CLASS, -1 },
791 { GA102_COMPUTE_CLASS, -1 },
792 { TU102_COMPUTE_CLASS, -1 },
793 { GV100_COMPUTE_CLASS, -1 },
794 { GP104_COMPUTE_CLASS, -1 },
795 { GP100_COMPUTE_CLASS, -1 },
796 { GM200_COMPUTE_CLASS, -1 },
797 { GM107_COMPUTE_CLASS, -1 },
798 { NVF0_COMPUTE_CLASS, -1 },
799 { NVE4_COMPUTE_CLASS, -1 },
800 /* In theory, GF110+ should also support NVC8_COMPUTE_CLASS but,
801 * in practice, a ILLEGAL_CLASS dmesg fail appears when using it. */
802 // { NVC8_COMPUTE_CLASS, -1 },
803 { NVC0_COMPUTE_CLASS, -1 },
804 {}
805 };
806 struct nouveau_object *chan = screen->base.channel;
807 int ret;
808
809 screen->base.base.get_compute_param = nvc0_screen_get_compute_param;
810
811 ret = nouveau_object_mclass(chan, computes);
812 if (ret < 0) {
813 NOUVEAU_ERR("No supported compute class: %d\n", ret);
814 return ret;
815 }
816
817 ret = nouveau_object_new(chan, 0xbeef00c0, computes[ret].oclass, NULL, 0, &screen->compute);
818 if (ret) {
819 NOUVEAU_ERR("Failed to allocate compute class: %d\n", ret);
820 return ret;
821 }
822
823 if (screen->compute->oclass < NVE4_COMPUTE_CLASS)
824 return nvc0_screen_compute_setup(screen, screen->base.pushbuf);
825
826 return nve4_screen_compute_setup(screen, screen->base.pushbuf);
827 }
828
829 static int
nvc0_screen_resize_tls_area(struct nvc0_screen * screen,uint32_t lpos,uint32_t lneg,uint32_t cstack)830 nvc0_screen_resize_tls_area(struct nvc0_screen *screen,
831 uint32_t lpos, uint32_t lneg, uint32_t cstack)
832 {
833 struct nouveau_bo *bo = NULL;
834 int ret;
835 uint64_t size = (lpos + lneg) * 32 + cstack;
836
837 if (size >= (1 << 20)) {
838 NOUVEAU_ERR("requested TLS size too large: 0x%"PRIx64"\n", size);
839 return -1;
840 }
841
842 size *= (screen->base.device->chipset >= 0xe0) ? 64 : 48; /* max warps */
843 size = align(size, 0x8000);
844 size *= screen->mp_count;
845
846 size = align(size, 1 << 17);
847
848 ret = nouveau_bo_new(screen->base.device, NV_VRAM_DOMAIN(&screen->base), 1 << 17, size,
849 NULL, &bo);
850 if (ret)
851 return ret;
852
853 /* Make sure that the pushbuf has acquired a reference to the old tls
854 * segment, as it may have commands that will reference it.
855 */
856 if (screen->tls)
857 PUSH_REF1(screen->base.pushbuf, screen->tls,
858 NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RDWR);
859 nouveau_bo_ref(NULL, &screen->tls);
860 screen->tls = bo;
861 return 0;
862 }
863
864 int
nvc0_screen_resize_text_area(struct nvc0_screen * screen,struct nouveau_pushbuf * push,uint64_t size)865 nvc0_screen_resize_text_area(struct nvc0_screen *screen, struct nouveau_pushbuf *push,
866 uint64_t size)
867 {
868 struct nouveau_bo *bo;
869 int ret;
870
871 ret = nouveau_bo_new(screen->base.device, NV_VRAM_DOMAIN(&screen->base),
872 1 << 17, size, NULL, &bo);
873 if (ret)
874 return ret;
875
876 /* Make sure that the pushbuf has acquired a reference to the old text
877 * segment, as it may have commands that will reference it.
878 */
879 if (screen->text)
880 PUSH_REF1(screen->base.pushbuf, screen->text,
881 NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RD);
882 nouveau_bo_ref(NULL, &screen->text);
883 screen->text = bo;
884
885 nouveau_heap_free(&screen->lib_code);
886 nouveau_heap_destroy(&screen->text_heap);
887
888 /*
889 * Shader storage needs a 2K (from NVIDIA) overallocations at the end
890 * to avoid prefetch bugs.
891 */
892 nouveau_heap_init(&screen->text_heap, 0, size - 0x800);
893
894 /* update the code segment setup */
895 if (screen->eng3d->oclass < GV100_3D_CLASS) {
896 BEGIN_NVC0(push, NVC0_3D(CODE_ADDRESS_HIGH), 2);
897 PUSH_DATAh(push, screen->text->offset);
898 PUSH_DATA (push, screen->text->offset);
899 if (screen->compute) {
900 BEGIN_NVC0(push, NVC0_CP(CODE_ADDRESS_HIGH), 2);
901 PUSH_DATAh(push, screen->text->offset);
902 PUSH_DATA (push, screen->text->offset);
903 }
904 }
905
906 return 0;
907 }
908
909 void
nvc0_screen_bind_cb_3d(struct nvc0_screen * screen,struct nouveau_pushbuf * push,bool * can_serialize,int stage,int index,int size,uint64_t addr)910 nvc0_screen_bind_cb_3d(struct nvc0_screen *screen, struct nouveau_pushbuf *push,
911 bool *can_serialize, int stage, int index, int size, uint64_t addr)
912 {
913 assert(stage != 5);
914
915 if (screen->base.class_3d >= GM107_3D_CLASS) {
916 struct nvc0_cb_binding *binding = &screen->cb_bindings[stage][index];
917
918 // TODO: Better figure out the conditions in which this is needed
919 bool serialize = binding->addr == addr && binding->size != size;
920 if (can_serialize)
921 serialize = serialize && *can_serialize;
922 if (serialize) {
923 IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
924 if (can_serialize)
925 *can_serialize = false;
926 }
927
928 binding->addr = addr;
929 binding->size = size;
930 }
931
932 if (size >= 0) {
933 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
934 PUSH_DATA (push, size);
935 PUSH_DATAh(push, addr);
936 PUSH_DATA (push, addr);
937 }
938 IMMED_NVC0(push, NVC0_3D(CB_BIND(stage)), (index << 4) | (size >= 0));
939 }
940
941 static const void *
nvc0_screen_get_compiler_options(struct pipe_screen * pscreen,enum pipe_shader_ir ir,enum pipe_shader_type shader)942 nvc0_screen_get_compiler_options(struct pipe_screen *pscreen,
943 enum pipe_shader_ir ir,
944 enum pipe_shader_type shader)
945 {
946 struct nvc0_screen *screen = nvc0_screen(pscreen);
947 if (ir == PIPE_SHADER_IR_NIR)
948 return nv50_ir_nir_shader_compiler_options(screen->base.device->chipset, shader);
949 return NULL;
950 }
951
952 #define FAIL_SCREEN_INIT(str, err) \
953 do { \
954 NOUVEAU_ERR(str, err); \
955 goto fail; \
956 } while(0)
957
958 struct nouveau_screen *
nvc0_screen_create(struct nouveau_device * dev)959 nvc0_screen_create(struct nouveau_device *dev)
960 {
961 struct nvc0_screen *screen;
962 struct pipe_screen *pscreen;
963 struct nouveau_object *chan;
964
965 struct nouveau_pushbuf *push;
966 uint64_t value;
967 uint32_t flags;
968 int ret;
969 unsigned i;
970
971 switch (dev->chipset & ~0xf) {
972 case 0xc0:
973 case 0xd0:
974 case 0xe0:
975 case 0xf0:
976 case 0x100:
977 case 0x110:
978 case 0x120:
979 case 0x130:
980 case 0x140:
981 case 0x160:
982 case 0x170:
983 case 0x190:
984 break;
985 default:
986 return NULL;
987 }
988
989 screen = CALLOC_STRUCT(nvc0_screen);
990 if (!screen)
991 return NULL;
992 pscreen = &screen->base.base;
993 pscreen->destroy = nvc0_screen_destroy;
994
995 simple_mtx_init(&screen->state_lock, mtx_plain);
996
997 ret = nouveau_screen_init(&screen->base, dev);
998 if (ret)
999 FAIL_SCREEN_INIT("Base screen init failed: %d\n", ret);
1000 chan = screen->base.channel;
1001 push = screen->base.pushbuf;
1002 push->rsvd_kick = 5;
1003
1004 /* TODO: could this be higher on Kepler+? how does reclocking vs no
1005 * reclocking affect performance?
1006 * TODO: could this be higher on Fermi?
1007 */
1008 if (dev->chipset >= 0xe0)
1009 screen->base.transfer_pushbuf_threshold = 1024;
1010
1011 screen->base.vidmem_bindings |= PIPE_BIND_CONSTANT_BUFFER |
1012 PIPE_BIND_SHADER_BUFFER |
1013 PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER |
1014 PIPE_BIND_COMMAND_ARGS_BUFFER | PIPE_BIND_QUERY_BUFFER;
1015 screen->base.sysmem_bindings |=
1016 PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER;
1017
1018 if (screen->base.vram_domain & NOUVEAU_BO_GART) {
1019 screen->base.sysmem_bindings |= screen->base.vidmem_bindings;
1020 screen->base.vidmem_bindings = 0;
1021 }
1022
1023 pscreen->context_create = nvc0_create;
1024 pscreen->is_format_supported = nvc0_screen_is_format_supported;
1025 pscreen->get_param = nvc0_screen_get_param;
1026 pscreen->get_shader_param = nvc0_screen_get_shader_param;
1027 pscreen->get_paramf = nvc0_screen_get_paramf;
1028 pscreen->get_sample_pixel_grid = nvc0_screen_get_sample_pixel_grid;
1029 pscreen->get_driver_query_info = nvc0_screen_get_driver_query_info;
1030 pscreen->get_driver_query_group_info = nvc0_screen_get_driver_query_group_info;
1031 /* nir stuff */
1032 pscreen->get_compiler_options = nvc0_screen_get_compiler_options;
1033
1034 nvc0_screen_init_resource_functions(pscreen);
1035
1036 screen->base.base.get_video_param = nouveau_vp3_screen_get_video_param;
1037 screen->base.base.is_video_format_supported = nouveau_vp3_screen_video_supported;
1038
1039 flags = NOUVEAU_BO_GART | NOUVEAU_BO_MAP;
1040 if (screen->base.drm->version >= 0x01000202)
1041 flags |= NOUVEAU_BO_COHERENT;
1042
1043 ret = nouveau_bo_new(dev, flags, 0, 4096, NULL, &screen->fence.bo);
1044 if (ret)
1045 FAIL_SCREEN_INIT("Error allocating fence BO: %d\n", ret);
1046 BO_MAP(&screen->base, screen->fence.bo, 0, NULL);
1047 screen->fence.map = screen->fence.bo->map;
1048 screen->base.fence.emit = nvc0_screen_fence_emit;
1049 screen->base.fence.update = nvc0_screen_fence_update;
1050
1051 if (dev->chipset < 0x140) {
1052 ret = nouveau_object_new(chan, (dev->chipset < 0xe0) ? 0x1f906e : 0x906e,
1053 NVIF_CLASS_SW_GF100, NULL, 0, &screen->nvsw);
1054 if (ret)
1055 FAIL_SCREEN_INIT("Error creating SW object: %d\n", ret);
1056
1057 BEGIN_NVC0(push, SUBC_SW(NV01_SUBCHAN_OBJECT), 1);
1058 PUSH_DATA (push, screen->nvsw->handle);
1059 }
1060
1061 const struct nouveau_mclass m2mfs[] = {
1062 { NVF0_P2MF_CLASS, -1 },
1063 { NVE4_P2MF_CLASS, -1 },
1064 { NVC0_M2MF_CLASS, -1 },
1065 {}
1066 };
1067
1068 ret = nouveau_object_mclass(chan, m2mfs);
1069 if (ret < 0)
1070 FAIL_SCREEN_INIT("No supported m2mf class: %d\n", ret);
1071
1072 ret = nouveau_object_new(chan, 0xbeef323f, m2mfs[ret].oclass, NULL, 0,
1073 &screen->m2mf);
1074 if (ret)
1075 FAIL_SCREEN_INIT("Error allocating PGRAPH context for M2MF: %d\n", ret);
1076
1077 BEGIN_NVC0(push, SUBC_M2MF(NV01_SUBCHAN_OBJECT), 1);
1078 PUSH_DATA (push, screen->m2mf->oclass);
1079
1080 if (screen->m2mf->oclass >= NVE4_P2MF_CLASS) {
1081 const struct nouveau_mclass copys[] = {
1082 { AMPERE_DMA_COPY_B, -1 },
1083 { AMPERE_DMA_COPY_A, -1 },
1084 { TURING_DMA_COPY_A, -1 },
1085 { VOLTA_DMA_COPY_A, -1 },
1086 { PASCAL_DMA_COPY_B, -1 },
1087 { PASCAL_DMA_COPY_A, -1 },
1088 { MAXWELL_DMA_COPY_A, -1 },
1089 { KEPLER_DMA_COPY_A, -1 },
1090 {}
1091 };
1092
1093 ret = nouveau_object_mclass(chan, copys);
1094 if (ret < 0)
1095 FAIL_SCREEN_INIT("No supported copy engine class: %d\n", ret);
1096
1097 ret = nouveau_object_new(chan, 0, copys[ret].oclass, NULL, 0, &screen->copy);
1098 if (ret)
1099 FAIL_SCREEN_INIT("Error allocating copy engine class: %d\n", ret);
1100
1101 BEGIN_NVC0(push, SUBC_COPY(NV01_SUBCHAN_OBJECT), 1);
1102 PUSH_DATA (push, screen->copy->oclass);
1103 }
1104
1105 ret = nouveau_object_new(chan, 0xbeef902d, NVC0_2D_CLASS, NULL, 0,
1106 &screen->eng2d);
1107 if (ret)
1108 FAIL_SCREEN_INIT("Error allocating PGRAPH context for 2D: %d\n", ret);
1109
1110 BEGIN_NVC0(push, SUBC_2D(NV01_SUBCHAN_OBJECT), 1);
1111 PUSH_DATA (push, screen->eng2d->oclass);
1112 BEGIN_NVC0(push, SUBC_2D(NVC0_2D_SINGLE_GPC), 1);
1113 PUSH_DATA (push, 0);
1114 BEGIN_NVC0(push, NVC0_2D(OPERATION), 1);
1115 PUSH_DATA (push, NV50_2D_OPERATION_SRCCOPY);
1116 BEGIN_NVC0(push, NVC0_2D(CLIP_ENABLE), 1);
1117 PUSH_DATA (push, 0);
1118 BEGIN_NVC0(push, NVC0_2D(COLOR_KEY_ENABLE), 1);
1119 PUSH_DATA (push, 0);
1120 BEGIN_NVC0(push, NVC0_2D(SET_PIXELS_FROM_MEMORY_CORRAL_SIZE), 1);
1121 PUSH_DATA (push, 0x3f);
1122 BEGIN_NVC0(push, NVC0_2D(SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP), 1);
1123 PUSH_DATA (push, 1);
1124 BEGIN_NVC0(push, NVC0_2D(COND_MODE), 1);
1125 PUSH_DATA (push, NV50_2D_COND_MODE_ALWAYS);
1126
1127 BEGIN_NVC0(push, SUBC_2D(NVC0_GRAPH_NOTIFY_ADDRESS_HIGH), 2);
1128 PUSH_DATAh(push, screen->fence.bo->offset + 16);
1129 PUSH_DATA (push, screen->fence.bo->offset + 16);
1130
1131 const struct nouveau_mclass threeds[] = {
1132 { AD102_3D_CLASS, -1 },
1133 { GA102_3D_CLASS, -1 },
1134 { TU102_3D_CLASS, -1 },
1135 { GV100_3D_CLASS, -1 },
1136 { GP102_3D_CLASS, -1 },
1137 { GP100_3D_CLASS, -1 },
1138 { GM200_3D_CLASS, -1 },
1139 { GM107_3D_CLASS, -1 },
1140 { NVF0_3D_CLASS, -1 },
1141 { NVEA_3D_CLASS, -1 },
1142 { NVE4_3D_CLASS, -1 },
1143 { NVC8_3D_CLASS, -1 },
1144 { NVC1_3D_CLASS, -1 },
1145 { NVC0_3D_CLASS, -1 },
1146 {}
1147 };
1148
1149 ret = nouveau_object_mclass(chan, threeds);
1150 if (ret < 0)
1151 FAIL_SCREEN_INIT("No supported 3d class: %d\n", ret);
1152
1153 ret = nouveau_object_new(chan, 0xbeef003d, threeds[ret].oclass, NULL, 0,
1154 &screen->eng3d);
1155 if (ret)
1156 FAIL_SCREEN_INIT("Error allocating PGRAPH context for 3D: %d\n", ret);
1157 screen->base.class_3d = screen->eng3d->oclass;
1158
1159 BEGIN_NVC0(push, SUBC_3D(NV01_SUBCHAN_OBJECT), 1);
1160 PUSH_DATA (push, screen->eng3d->oclass);
1161
1162 BEGIN_NVC0(push, NVC0_3D(COND_MODE), 1);
1163 PUSH_DATA (push, NVC0_3D_COND_MODE_ALWAYS);
1164
1165 if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", true)) {
1166 /* kill shaders after about 1 second (at 100 MHz) */
1167 BEGIN_NVC0(push, NVC0_3D(WATCHDOG_TIMER), 1);
1168 PUSH_DATA (push, 0x17);
1169 }
1170
1171 IMMED_NVC0(push, NVC0_3D(ZETA_COMP_ENABLE),
1172 screen->base.drm->version >= 0x01000101);
1173 BEGIN_NVC0(push, NVC0_3D(RT_COMP_ENABLE(0)), 8);
1174 for (i = 0; i < 8; ++i)
1175 PUSH_DATA(push, screen->base.drm->version >= 0x01000101);
1176
1177 BEGIN_NVC0(push, NVC0_3D(RT_CONTROL), 1);
1178 PUSH_DATA (push, 1);
1179
1180 BEGIN_NVC0(push, NVC0_3D(CSAA_ENABLE), 1);
1181 PUSH_DATA (push, 0);
1182 BEGIN_NVC0(push, NVC0_3D(MULTISAMPLE_ENABLE), 1);
1183 PUSH_DATA (push, 0);
1184 BEGIN_NVC0(push, NVC0_3D(MULTISAMPLE_MODE), 1);
1185 PUSH_DATA (push, NVC0_3D_MULTISAMPLE_MODE_MS1);
1186 BEGIN_NVC0(push, NVC0_3D(MULTISAMPLE_CTRL), 1);
1187 PUSH_DATA (push, 0);
1188 BEGIN_NVC0(push, NVC0_3D(LINE_WIDTH_SEPARATE), 1);
1189 PUSH_DATA (push, 1);
1190 BEGIN_NVC0(push, NVC0_3D(PRIM_RESTART_WITH_DRAW_ARRAYS), 1);
1191 PUSH_DATA (push, 1);
1192 BEGIN_NVC0(push, NVC0_3D(BLEND_SEPARATE_ALPHA), 1);
1193 PUSH_DATA (push, 1);
1194 BEGIN_NVC0(push, NVC0_3D(BLEND_ENABLE_COMMON), 1);
1195 PUSH_DATA (push, 0);
1196 BEGIN_NVC0(push, NVC0_3D(SHADE_MODEL), 1);
1197 PUSH_DATA (push, NVC0_3D_SHADE_MODEL_SMOOTH);
1198 if (screen->eng3d->oclass < NVE4_3D_CLASS) {
1199 IMMED_NVC0(push, NVC0_3D(TEX_MISC), 0);
1200 } else if (screen->eng3d->oclass < GA102_3D_CLASS) {
1201 BEGIN_NVC0(push, NVE4_3D(TEX_CB_INDEX), 1);
1202 PUSH_DATA (push, 15);
1203 }
1204 BEGIN_NVC0(push, NVC0_3D(CALL_LIMIT_LOG), 1);
1205 PUSH_DATA (push, 8); /* 128 */
1206 BEGIN_NVC0(push, NVC0_3D(ZCULL_STATCTRS_ENABLE), 1);
1207 PUSH_DATA (push, 1);
1208 if (screen->eng3d->oclass >= NVC1_3D_CLASS) {
1209 BEGIN_NVC0(push, NVC0_3D(CACHE_SPLIT), 1);
1210 PUSH_DATA (push, NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1);
1211 }
1212
1213 nvc0_magic_3d_init(push, screen->eng3d->oclass);
1214
1215 ret = nvc0_screen_resize_text_area(screen, push, 1 << 19);
1216 if (ret)
1217 FAIL_SCREEN_INIT("Error allocating TEXT area: %d\n", ret);
1218
1219 /* 6 user uniform areas, 6 driver areas, and 1 for the runout */
1220 ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 1 << 12, 13 << 16, NULL,
1221 &screen->uniform_bo);
1222 if (ret)
1223 FAIL_SCREEN_INIT("Error allocating uniform BO: %d\n", ret);
1224
1225 PUSH_REF1 (push, screen->uniform_bo, NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_WR);
1226
1227 /* return { 0.0, 0.0, 0.0, 0.0 } for out-of-bounds vtxbuf access */
1228 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
1229 PUSH_DATA (push, 256);
1230 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_RUNOUT_INFO);
1231 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_RUNOUT_INFO);
1232 BEGIN_1IC0(push, NVC0_3D(CB_POS), 5);
1233 PUSH_DATA (push, 0);
1234 PUSH_DATAf(push, 0.0f);
1235 PUSH_DATAf(push, 0.0f);
1236 PUSH_DATAf(push, 0.0f);
1237 PUSH_DATAf(push, 0.0f);
1238 BEGIN_NVC0(push, NVC0_3D(VERTEX_RUNOUT_ADDRESS_HIGH), 2);
1239 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_RUNOUT_INFO);
1240 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_RUNOUT_INFO);
1241
1242 if (screen->base.drm->version >= 0x01000101) {
1243 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_GRAPH_UNITS, &value);
1244 if (ret)
1245 FAIL_SCREEN_INIT("NOUVEAU_GETPARAM_GRAPH_UNITS failed: %d\n", ret);
1246 } else {
1247 if (dev->chipset >= 0xe0 && dev->chipset < 0xf0)
1248 value = (8 << 8) | 4;
1249 else
1250 value = (16 << 8) | 4;
1251 }
1252 screen->gpc_count = value & 0x000000ff;
1253 screen->mp_count = value >> 8;
1254 screen->mp_count_compute = screen->mp_count;
1255
1256 ret = nvc0_screen_resize_tls_area(screen, 128 * 16, 0, 0x200);
1257 if (ret)
1258 FAIL_SCREEN_INIT("Error allocating TLS area: %d\n", ret);
1259
1260 BEGIN_NVC0(push, NVC0_3D(TEMP_ADDRESS_HIGH), 4);
1261 PUSH_DATAh(push, screen->tls->offset);
1262 PUSH_DATA (push, screen->tls->offset);
1263 PUSH_DATA (push, screen->tls->size >> 32);
1264 PUSH_DATA (push, screen->tls->size);
1265 BEGIN_NVC0(push, NVC0_3D(WARP_TEMP_ALLOC), 1);
1266 PUSH_DATA (push, 0);
1267 /* Reduce likelihood of collision with real buffers by placing the hole at
1268 * the top of the 4G area. This will have to be dealt with for real
1269 * eventually by blocking off that area from the VM.
1270 */
1271 BEGIN_NVC0(push, NVC0_3D(LOCAL_BASE), 1);
1272 PUSH_DATA (push, 0xff << 24);
1273
1274 if (screen->eng3d->oclass < GM107_3D_CLASS) {
1275 ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 1 << 17, 1 << 20, NULL,
1276 &screen->poly_cache);
1277 if (ret)
1278 FAIL_SCREEN_INIT("Error allocating poly cache BO: %d\n", ret);
1279
1280 BEGIN_NVC0(push, NVC0_3D(VERTEX_QUARANTINE_ADDRESS_HIGH), 3);
1281 PUSH_DATAh(push, screen->poly_cache->offset);
1282 PUSH_DATA (push, screen->poly_cache->offset);
1283 PUSH_DATA (push, 3);
1284 }
1285
1286 ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 1 << 17, 1 << 17, NULL,
1287 &screen->txc);
1288 if (ret)
1289 FAIL_SCREEN_INIT("Error allocating txc BO: %d\n", ret);
1290
1291 BEGIN_NVC0(push, NVC0_3D(TIC_ADDRESS_HIGH), 3);
1292 PUSH_DATAh(push, screen->txc->offset);
1293 PUSH_DATA (push, screen->txc->offset);
1294 PUSH_DATA (push, NVC0_TIC_MAX_ENTRIES - 1);
1295 if (screen->eng3d->oclass >= GM107_3D_CLASS) {
1296 screen->tic.maxwell = true;
1297 if (screen->eng3d->oclass == GM107_3D_CLASS) {
1298 screen->tic.maxwell =
1299 debug_get_bool_option("NOUVEAU_MAXWELL_TIC", true);
1300 IMMED_NVC0(push, SUBC_3D(0x0f10), screen->tic.maxwell);
1301 }
1302 }
1303
1304 BEGIN_NVC0(push, NVC0_3D(TSC_ADDRESS_HIGH), 3);
1305 PUSH_DATAh(push, screen->txc->offset + 65536);
1306 PUSH_DATA (push, screen->txc->offset + 65536);
1307 PUSH_DATA (push, NVC0_TSC_MAX_ENTRIES - 1);
1308
1309 BEGIN_NVC0(push, NVC0_3D(SCREEN_Y_CONTROL), 1);
1310 PUSH_DATA (push, 0);
1311 BEGIN_NVC0(push, NVC0_3D(WINDOW_OFFSET_X), 2);
1312 PUSH_DATA (push, 0);
1313 PUSH_DATA (push, 0);
1314 BEGIN_NVC0(push, NVC0_3D(ZCULL_REGION), 1); /* deactivate ZCULL */
1315 PUSH_DATA (push, 0x3f);
1316
1317 BEGIN_NVC0(push, NVC0_3D(CLIP_RECTS_MODE), 1);
1318 PUSH_DATA (push, NVC0_3D_CLIP_RECTS_MODE_INSIDE_ANY);
1319 BEGIN_NVC0(push, NVC0_3D(CLIP_RECT_HORIZ(0)), 8 * 2);
1320 for (i = 0; i < 8 * 2; ++i)
1321 PUSH_DATA(push, 0);
1322 BEGIN_NVC0(push, NVC0_3D(CLIP_RECTS_EN), 1);
1323 PUSH_DATA (push, 0);
1324 BEGIN_NVC0(push, NVC0_3D(CLIPID_ENABLE), 1);
1325 PUSH_DATA (push, 0);
1326
1327 /* neither scissors, viewport nor stencil mask should affect clears */
1328 BEGIN_NVC0(push, NVC0_3D(CLEAR_FLAGS), 1);
1329 PUSH_DATA (push, 0);
1330
1331 BEGIN_NVC0(push, NVC0_3D(VIEWPORT_TRANSFORM_EN), 1);
1332 PUSH_DATA (push, 1);
1333 for (i = 0; i < NVC0_MAX_VIEWPORTS; i++) {
1334 BEGIN_NVC0(push, NVC0_3D(DEPTH_RANGE_NEAR(i)), 2);
1335 PUSH_DATAf(push, 0.0f);
1336 PUSH_DATAf(push, 1.0f);
1337 }
1338 BEGIN_NVC0(push, NVC0_3D(VIEW_VOLUME_CLIP_CTRL), 1);
1339 PUSH_DATA (push, NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1);
1340
1341 /* We use scissors instead of exact view volume clipping,
1342 * so they're always enabled.
1343 */
1344 for (i = 0; i < NVC0_MAX_VIEWPORTS; i++) {
1345 BEGIN_NVC0(push, NVC0_3D(SCISSOR_ENABLE(i)), 3);
1346 PUSH_DATA (push, 1);
1347 PUSH_DATA (push, 16384 << 16);
1348 PUSH_DATA (push, 16384 << 16);
1349 }
1350
1351 if (screen->eng3d->oclass < TU102_3D_CLASS) {
1352 #define MK_MACRO(m, n) i = nvc0_graph_set_macro(screen, m, i, sizeof(n), n);
1353
1354 i = 0;
1355 MK_MACRO(NVC0_3D_MACRO_VERTEX_ARRAY_PER_INSTANCE, mme9097_per_instance_bf);
1356 MK_MACRO(NVC0_3D_MACRO_BLEND_ENABLES, mme9097_blend_enables);
1357 MK_MACRO(NVC0_3D_MACRO_VERTEX_ARRAY_SELECT, mme9097_vertex_array_select);
1358 MK_MACRO(NVC0_3D_MACRO_TEP_SELECT, mme9097_tep_select);
1359 MK_MACRO(NVC0_3D_MACRO_GP_SELECT, mme9097_gp_select);
1360 MK_MACRO(NVC0_3D_MACRO_POLYGON_MODE_FRONT, mme9097_poly_mode_front);
1361 MK_MACRO(NVC0_3D_MACRO_POLYGON_MODE_BACK, mme9097_poly_mode_back);
1362 MK_MACRO(NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT, mme9097_draw_arrays_indirect);
1363 MK_MACRO(NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT, mme9097_draw_elts_indirect);
1364 MK_MACRO(NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT_COUNT, mme9097_draw_arrays_indirect_count);
1365 MK_MACRO(NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT, mme9097_draw_elts_indirect_count);
1366 MK_MACRO(NVC0_3D_MACRO_QUERY_BUFFER_WRITE, mme9097_query_buffer_write);
1367 MK_MACRO(NVC0_3D_MACRO_CONSERVATIVE_RASTER_STATE, mme9097_conservative_raster_state);
1368 MK_MACRO(NVC0_3D_MACRO_SET_PRIV_REG, mme9097_set_priv_reg);
1369 MK_MACRO(NVC0_3D_MACRO_COMPUTE_COUNTER, mme9097_compute_counter);
1370 MK_MACRO(NVC0_3D_MACRO_COMPUTE_COUNTER_TO_QUERY, mme9097_compute_counter_to_query);
1371 MK_MACRO(NVC0_CP_MACRO_LAUNCH_GRID_INDIRECT, mme90c0_launch_grid_indirect);
1372 } else {
1373 #undef MK_MACRO
1374 #define MK_MACRO(m, n) i = tu102_graph_set_macro(screen, m, i, sizeof(n), n);
1375
1376 i = 0;
1377 MK_MACRO(NVC0_3D_MACRO_VERTEX_ARRAY_PER_INSTANCE, mmec597_per_instance_bf);
1378 MK_MACRO(NVC0_3D_MACRO_BLEND_ENABLES, mmec597_blend_enables);
1379 MK_MACRO(NVC0_3D_MACRO_VERTEX_ARRAY_SELECT, mmec597_vertex_array_select);
1380 MK_MACRO(NVC0_3D_MACRO_TEP_SELECT, mmec597_tep_select);
1381 MK_MACRO(NVC0_3D_MACRO_GP_SELECT, mmec597_gp_select);
1382 MK_MACRO(NVC0_3D_MACRO_POLYGON_MODE_FRONT, mmec597_poly_mode_front);
1383 MK_MACRO(NVC0_3D_MACRO_POLYGON_MODE_BACK, mmec597_poly_mode_back);
1384 MK_MACRO(NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT, mmec597_draw_arrays_indirect);
1385 MK_MACRO(NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT, mmec597_draw_elts_indirect);
1386 MK_MACRO(NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT_COUNT, mmec597_draw_arrays_indirect_count);
1387 MK_MACRO(NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT, mmec597_draw_elts_indirect_count);
1388 MK_MACRO(NVC0_3D_MACRO_QUERY_BUFFER_WRITE, mmec597_query_buffer_write);
1389 MK_MACRO(NVC0_3D_MACRO_CONSERVATIVE_RASTER_STATE, mmec597_conservative_raster_state);
1390 MK_MACRO(NVC0_3D_MACRO_SET_PRIV_REG, mmec597_set_priv_reg);
1391 MK_MACRO(NVC0_3D_MACRO_COMPUTE_COUNTER, mmec597_compute_counter);
1392 MK_MACRO(NVC0_3D_MACRO_COMPUTE_COUNTER_TO_QUERY, mmec597_compute_counter_to_query);
1393 }
1394
1395 BEGIN_NVC0(push, NVC0_3D(RASTERIZE_ENABLE), 1);
1396 PUSH_DATA (push, 1);
1397 BEGIN_NVC0(push, NVC0_3D(RT_SEPARATE_FRAG_DATA), 1);
1398 PUSH_DATA (push, 1);
1399 BEGIN_NVC0(push, NVC0_3D(MACRO_GP_SELECT), 1);
1400 PUSH_DATA (push, 0x40);
1401 BEGIN_NVC0(push, NVC0_3D(LAYER), 1);
1402 PUSH_DATA (push, 0);
1403 BEGIN_NVC0(push, NVC0_3D(MACRO_TEP_SELECT), 1);
1404 PUSH_DATA (push, 0x30);
1405 BEGIN_NVC0(push, NVC0_3D(PATCH_VERTICES), 1);
1406 PUSH_DATA (push, 3);
1407 BEGIN_NVC0(push, NVC0_3D(SP_SELECT(2)), 1);
1408 PUSH_DATA (push, 0x20);
1409 BEGIN_NVC0(push, NVC0_3D(SP_SELECT(0)), 1);
1410 PUSH_DATA (push, 0x00);
1411 screen->save_state.patch_vertices = 3;
1412
1413 BEGIN_NVC0(push, NVC0_3D(POINT_COORD_REPLACE), 1);
1414 PUSH_DATA (push, 0);
1415 BEGIN_NVC0(push, NVC0_3D(POINT_RASTER_RULES), 1);
1416 PUSH_DATA (push, NVC0_3D_POINT_RASTER_RULES_OGL);
1417
1418 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), 1);
1419
1420 if (nvc0_screen_init_compute(screen))
1421 goto fail;
1422
1423 /* XXX: Compute and 3D are somehow aliased on Fermi. */
1424 for (i = 0; i < 5; ++i) {
1425 unsigned j = 0;
1426 for (j = 0; j < 16; j++)
1427 screen->cb_bindings[i][j].size = -1;
1428
1429 /* TIC and TSC entries for each unit (nve4+ only) */
1430 /* auxiliary constants (6 user clip planes, base instance id) */
1431 nvc0_screen_bind_cb_3d(screen, push, NULL, i, 15, NVC0_CB_AUX_SIZE,
1432 screen->uniform_bo->offset + NVC0_CB_AUX_INFO(i));
1433 if (screen->eng3d->oclass >= NVE4_3D_CLASS) {
1434 unsigned j;
1435 BEGIN_1IC0(push, NVC0_3D(CB_POS), 9);
1436 PUSH_DATA (push, NVC0_CB_AUX_UNK_INFO);
1437 for (j = 0; j < 8; ++j)
1438 PUSH_DATA(push, j);
1439 } else {
1440 BEGIN_NVC0(push, NVC0_3D(TEX_LIMITS(i)), 1);
1441 PUSH_DATA (push, 0x54);
1442 }
1443
1444 /* MS sample coordinate offsets: these do not work with _ALT modes ! */
1445 BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 2 * 8);
1446 PUSH_DATA (push, NVC0_CB_AUX_MS_INFO);
1447 PUSH_DATA (push, 0); /* 0 */
1448 PUSH_DATA (push, 0);
1449 PUSH_DATA (push, 1); /* 1 */
1450 PUSH_DATA (push, 0);
1451 PUSH_DATA (push, 0); /* 2 */
1452 PUSH_DATA (push, 1);
1453 PUSH_DATA (push, 1); /* 3 */
1454 PUSH_DATA (push, 1);
1455 PUSH_DATA (push, 2); /* 4 */
1456 PUSH_DATA (push, 0);
1457 PUSH_DATA (push, 3); /* 5 */
1458 PUSH_DATA (push, 0);
1459 PUSH_DATA (push, 2); /* 6 */
1460 PUSH_DATA (push, 1);
1461 PUSH_DATA (push, 3); /* 7 */
1462 PUSH_DATA (push, 1);
1463 }
1464 BEGIN_NVC0(push, NVC0_3D(LINKED_TSC), 1);
1465 PUSH_DATA (push, 0);
1466
1467 /* requires Nvidia provided firmware */
1468 if (screen->eng3d->oclass >= GM200_3D_CLASS) {
1469 unsigned reg = screen->eng3d->oclass >= GV100_3D_CLASS ? 0x419ba4 : 0x419f78;
1470 BEGIN_1IC0(push, NVC0_3D(MACRO_SET_PRIV_REG), 3);
1471 PUSH_DATA (push, reg);
1472 PUSH_DATA (push, 0x00000000);
1473 PUSH_DATA (push, 0x00000008);
1474 }
1475
1476 PUSH_KICK (push);
1477
1478 screen->tic.entries = CALLOC(
1479 NVC0_TIC_MAX_ENTRIES + NVC0_TSC_MAX_ENTRIES + NVE4_IMG_MAX_HANDLES,
1480 sizeof(void *));
1481 screen->tsc.entries = screen->tic.entries + NVC0_TIC_MAX_ENTRIES;
1482 screen->img.entries = (void *)(screen->tsc.entries + NVC0_TSC_MAX_ENTRIES);
1483
1484 if (!nvc0_blitter_create(screen))
1485 goto fail;
1486
1487 nouveau_device_set_classes_for_debug(dev,
1488 screen->eng3d->oclass,
1489 screen->compute->oclass,
1490 screen->m2mf->oclass,
1491 screen->copy ? screen->copy->oclass : 0);
1492 return &screen->base;
1493
1494 fail:
1495 screen->base.base.context_create = NULL;
1496 return &screen->base;
1497 }
1498
1499 int
nvc0_screen_tic_alloc(struct nvc0_screen * screen,void * entry)1500 nvc0_screen_tic_alloc(struct nvc0_screen *screen, void *entry)
1501 {
1502 int i = screen->tic.next;
1503
1504 while (screen->tic.lock[i / 32] & (1 << (i % 32)))
1505 i = (i + 1) & (NVC0_TIC_MAX_ENTRIES - 1);
1506
1507 screen->tic.next = (i + 1) & (NVC0_TIC_MAX_ENTRIES - 1);
1508
1509 if (screen->tic.entries[i])
1510 nv50_tic_entry(screen->tic.entries[i])->id = -1;
1511
1512 screen->tic.entries[i] = entry;
1513 return i;
1514 }
1515
1516 int
nvc0_screen_tsc_alloc(struct nvc0_screen * screen,void * entry)1517 nvc0_screen_tsc_alloc(struct nvc0_screen *screen, void *entry)
1518 {
1519 int i = screen->tsc.next;
1520
1521 while (screen->tsc.lock[i / 32] & (1 << (i % 32)))
1522 i = (i + 1) & (NVC0_TSC_MAX_ENTRIES - 1);
1523
1524 screen->tsc.next = (i + 1) & (NVC0_TSC_MAX_ENTRIES - 1);
1525
1526 if (screen->tsc.entries[i])
1527 nv50_tsc_entry(screen->tsc.entries[i])->id = -1;
1528
1529 screen->tsc.entries[i] = entry;
1530 return i;
1531 }
1532