1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nvk_buffer.h"
6 #include "nvk_cmd_buffer.h"
7 #include "nvk_device.h"
8 #include "nvk_entrypoints.h"
9 #include "nvk_mme.h"
10 #include "nvk_physical_device.h"
11 #include "nvk_shader.h"
12
13 #include "cl906f.h"
14 #include "cla0b5.h"
15 #include "cla1c0.h"
16 #include "clc0c0.h"
17 #include "clc5c0.h"
18 #include "nv_push_cl90c0.h"
19 #include "nv_push_cl9097.h"
20 #include "nv_push_cla0c0.h"
21 #include "nv_push_clb0c0.h"
22 #include "nv_push_clb1c0.h"
23 #include "nv_push_clc3c0.h"
24 #include "nv_push_clc597.h"
25 #include "nv_push_clc6c0.h"
26
27 VkResult
nvk_push_dispatch_state_init(struct nvk_queue * queue,struct nv_push * p)28 nvk_push_dispatch_state_init(struct nvk_queue *queue, struct nv_push *p)
29 {
30 struct nvk_device *dev = nvk_queue_device(queue);
31 struct nvk_physical_device *pdev = nvk_device_physical(dev);
32
33 P_MTHD(p, NV90C0, SET_OBJECT);
34 P_NV90C0_SET_OBJECT(p, {
35 .class_id = pdev->info.cls_compute,
36 .engine_id = 0,
37 });
38
39 if (pdev->info.cls_compute == MAXWELL_COMPUTE_A)
40 P_IMMD(p, NVB0C0, SET_SELECT_MAXWELL_TEXTURE_HEADERS, V_TRUE);
41
42 if (pdev->info.cls_eng3d < VOLTA_COMPUTE_A) {
43 uint64_t shader_base_addr =
44 nvk_heap_contiguous_base_address(&dev->shader_heap);
45
46 P_MTHD(p, NVA0C0, SET_PROGRAM_REGION_A);
47 P_NVA0C0_SET_PROGRAM_REGION_A(p, shader_base_addr >> 32);
48 P_NVA0C0_SET_PROGRAM_REGION_B(p, shader_base_addr);
49 }
50
51 return VK_SUCCESS;
52 }
53
54 static inline uint16_t
nvk_cmd_buffer_compute_cls(struct nvk_cmd_buffer * cmd)55 nvk_cmd_buffer_compute_cls(struct nvk_cmd_buffer *cmd)
56 {
57 struct nvk_device *dev = nvk_cmd_buffer_device(cmd);
58 struct nvk_physical_device *pdev = nvk_device_physical(dev);
59 return pdev->info.cls_compute;
60 }
61
62 void
nvk_cmd_buffer_begin_compute(struct nvk_cmd_buffer * cmd,const VkCommandBufferBeginInfo * pBeginInfo)63 nvk_cmd_buffer_begin_compute(struct nvk_cmd_buffer *cmd,
64 const VkCommandBufferBeginInfo *pBeginInfo)
65 {
66 if (cmd->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
67 struct nv_push *p = nvk_cmd_buffer_push(cmd, 6);
68 if (nvk_cmd_buffer_compute_cls(cmd) >= MAXWELL_COMPUTE_B) {
69 P_IMMD(p, NVB1C0, INVALIDATE_SKED_CACHES, 0);
70 }
71 P_IMMD(p, NVA0C0, INVALIDATE_SAMPLER_CACHE_NO_WFI, {
72 .lines = LINES_ALL,
73 });
74 P_IMMD(p, NVA0C0, INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI, {
75 .lines = LINES_ALL,
76 });
77 }
78 }
79
80 void
nvk_cmd_invalidate_compute_state(struct nvk_cmd_buffer * cmd)81 nvk_cmd_invalidate_compute_state(struct nvk_cmd_buffer *cmd)
82 {
83 memset(&cmd->state.cs, 0, sizeof(cmd->state.cs));
84 }
85
86 void
nvk_cmd_bind_compute_shader(struct nvk_cmd_buffer * cmd,struct nvk_shader * shader)87 nvk_cmd_bind_compute_shader(struct nvk_cmd_buffer *cmd,
88 struct nvk_shader *shader)
89 {
90 cmd->state.cs.shader = shader;
91 }
92
93 static uint32_t
nvk_compute_local_size(struct nvk_cmd_buffer * cmd)94 nvk_compute_local_size(struct nvk_cmd_buffer *cmd)
95 {
96 const struct nvk_shader *shader = cmd->state.cs.shader;
97
98 return shader->info.cs.local_size[0] *
99 shader->info.cs.local_size[1] *
100 shader->info.cs.local_size[2];
101 }
102
103 static void
nvk_flush_compute_state(struct nvk_cmd_buffer * cmd,uint32_t base_workgroup[3],uint32_t global_size[3])104 nvk_flush_compute_state(struct nvk_cmd_buffer *cmd,
105 uint32_t base_workgroup[3],
106 uint32_t global_size[3])
107 {
108 struct nvk_descriptor_state *desc = &cmd->state.cs.descriptors;
109
110 nvk_cmd_buffer_flush_push_descriptors(cmd, desc);
111
112 nvk_descriptor_state_set_root_array(cmd, desc, cs.base_group,
113 0, 3, base_workgroup);
114 nvk_descriptor_state_set_root_array(cmd, desc, cs.group_count,
115 0, 3, global_size);
116 }
117
118 static VkResult
nvk_cmd_upload_qmd(struct nvk_cmd_buffer * cmd,const struct nvk_shader * shader,const struct nvk_descriptor_state * desc,const struct nvk_root_descriptor_table * root,uint32_t global_size[3],uint64_t * qmd_addr_out,uint64_t * root_desc_addr_out)119 nvk_cmd_upload_qmd(struct nvk_cmd_buffer *cmd,
120 const struct nvk_shader *shader,
121 const struct nvk_descriptor_state *desc,
122 const struct nvk_root_descriptor_table *root,
123 uint32_t global_size[3],
124 uint64_t *qmd_addr_out,
125 uint64_t *root_desc_addr_out)
126 {
127 struct nvk_device *dev = nvk_cmd_buffer_device(cmd);
128 struct nvk_physical_device *pdev = nvk_device_physical(dev);
129 const uint32_t min_cbuf_alignment = nvk_min_cbuf_alignment(&pdev->info);
130 VkResult result;
131
132 /* pre Pascal the constant buffer sizes need to be 0x100 aligned. As we
133 * simply allocated a buffer and upload data to it, make sure its size is
134 * 0x100 aligned.
135 */
136 STATIC_ASSERT((sizeof(*root) & 0xff) == 0);
137 assert(sizeof(*root) % min_cbuf_alignment == 0);
138
139 void *root_desc_map;
140 uint64_t root_desc_addr;
141 result = nvk_cmd_buffer_upload_alloc(cmd, sizeof(*root), min_cbuf_alignment,
142 &root_desc_addr, &root_desc_map);
143 if (unlikely(result != VK_SUCCESS))
144 return result;
145
146 memcpy(root_desc_map, root, sizeof(*root));
147
148 struct nak_qmd_info qmd_info = {
149 .addr = shader->hdr_addr,
150 .smem_size = shader->info.cs.smem_size,
151 .smem_max = NVK_MAX_SHARED_SIZE,
152 .global_size = {
153 global_size[0],
154 global_size[1],
155 global_size[2],
156 },
157 };
158
159 assert(shader->cbuf_map.cbuf_count <= ARRAY_SIZE(qmd_info.cbufs));
160 for (uint32_t c = 0; c < shader->cbuf_map.cbuf_count; c++) {
161 const struct nvk_cbuf *cbuf = &shader->cbuf_map.cbufs[c];
162
163 struct nvk_buffer_address ba;
164 if (cbuf->type == NVK_CBUF_TYPE_ROOT_DESC) {
165 ba = (struct nvk_buffer_address) {
166 .base_addr = root_desc_addr,
167 .size = sizeof(*root),
168 };
169 } else {
170 ASSERTED bool direct_descriptor =
171 nvk_cmd_buffer_get_cbuf_addr(cmd, desc, shader, cbuf, &ba);
172 assert(direct_descriptor);
173 }
174
175 if (ba.size > 0) {
176 assert(ba.base_addr % min_cbuf_alignment == 0);
177 ba.size = align(ba.size, min_cbuf_alignment);
178 ba.size = MIN2(ba.size, NVK_MAX_CBUF_SIZE);
179
180 qmd_info.cbufs[qmd_info.num_cbufs++] = (struct nak_qmd_cbuf) {
181 .index = c,
182 .addr = ba.base_addr,
183 .size = ba.size,
184 };
185 }
186 }
187
188 uint32_t qmd[64];
189 nak_fill_qmd(&pdev->info, &shader->info, &qmd_info, qmd, sizeof(qmd));
190
191 uint64_t qmd_addr;
192 result = nvk_cmd_buffer_upload_data(cmd, qmd, sizeof(qmd), 0x100, &qmd_addr);
193 if (unlikely(result != VK_SUCCESS))
194 return result;
195
196 *qmd_addr_out = qmd_addr;
197 if (root_desc_addr_out != NULL)
198 *root_desc_addr_out = root_desc_addr;
199
200 return VK_SUCCESS;
201 }
202
203 static void
nvk_build_mme_add_cs_invocations(struct mme_builder * b,struct mme_value64 count)204 nvk_build_mme_add_cs_invocations(struct mme_builder *b,
205 struct mme_value64 count)
206 {
207 struct mme_value accum_hi = nvk_mme_load_scratch(b, CS_INVOCATIONS_HI);
208 struct mme_value accum_lo = nvk_mme_load_scratch(b, CS_INVOCATIONS_LO);
209 struct mme_value64 accum = mme_value64(accum_lo, accum_hi);
210
211 mme_add64_to(b, accum, accum, count);
212
213 STATIC_ASSERT(NVK_MME_SCRATCH_CS_INVOCATIONS_HI + 1 ==
214 NVK_MME_SCRATCH_CS_INVOCATIONS_LO);
215
216 mme_mthd(b, NVK_SET_MME_SCRATCH(CS_INVOCATIONS_HI));
217 mme_emit(b, accum.hi);
218 mme_emit(b, accum.lo);
219
220 mme_free_reg64(b, accum);
221 }
222
223 void
nvk_mme_add_cs_invocations(struct mme_builder * b)224 nvk_mme_add_cs_invocations(struct mme_builder *b)
225 {
226 struct mme_value64 count = mme_load_addr64(b);
227
228 nvk_build_mme_add_cs_invocations(b, count);
229 }
230
231 VKAPI_ATTR void VKAPI_CALL
nvk_CmdDispatchBase(VkCommandBuffer commandBuffer,uint32_t baseGroupX,uint32_t baseGroupY,uint32_t baseGroupZ,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)232 nvk_CmdDispatchBase(VkCommandBuffer commandBuffer,
233 uint32_t baseGroupX,
234 uint32_t baseGroupY,
235 uint32_t baseGroupZ,
236 uint32_t groupCountX,
237 uint32_t groupCountY,
238 uint32_t groupCountZ)
239 {
240 VK_FROM_HANDLE(nvk_cmd_buffer, cmd, commandBuffer);
241 struct nvk_descriptor_state *desc = &cmd->state.cs.descriptors;
242
243 uint32_t base_workgroup[3] = { baseGroupX, baseGroupY, baseGroupZ };
244 uint32_t global_size[3] = { groupCountX, groupCountY, groupCountZ };
245 nvk_flush_compute_state(cmd, base_workgroup, global_size);
246
247 uint64_t qmd_addr;
248 VkResult result = nvk_cmd_upload_qmd(cmd, cmd->state.cs.shader,
249 desc, (void *)desc->root, global_size,
250 &qmd_addr, NULL);
251 if (result != VK_SUCCESS) {
252 vk_command_buffer_set_error(&cmd->vk, result);
253 return;
254 }
255
256 const uint32_t local_size = nvk_compute_local_size(cmd);
257 const uint64_t cs_invocations =
258 (uint64_t)local_size * (uint64_t)groupCountX *
259 (uint64_t)groupCountY * (uint64_t)groupCountZ;
260
261 struct nv_push *p = nvk_cmd_buffer_push(cmd, 7);
262
263 P_1INC(p, NV9097, CALL_MME_MACRO(NVK_MME_ADD_CS_INVOCATIONS));
264 P_INLINE_DATA(p, cs_invocations >> 32);
265 P_INLINE_DATA(p, cs_invocations);
266
267 P_MTHD(p, NVA0C0, SEND_PCAS_A);
268 P_NVA0C0_SEND_PCAS_A(p, qmd_addr >> 8);
269
270 if (nvk_cmd_buffer_compute_cls(cmd) <= TURING_COMPUTE_A) {
271 P_IMMD(p, NVA0C0, SEND_SIGNALING_PCAS_B, {
272 .invalidate = INVALIDATE_TRUE,
273 .schedule = SCHEDULE_TRUE
274 });
275 } else {
276 P_IMMD(p, NVC6C0, SEND_SIGNALING_PCAS2_B,
277 PCAS_ACTION_INVALIDATE_COPY_SCHEDULE);
278 }
279 }
280
281 void
nvk_cmd_dispatch_shader(struct nvk_cmd_buffer * cmd,struct nvk_shader * shader,const void * push_data,size_t push_size,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)282 nvk_cmd_dispatch_shader(struct nvk_cmd_buffer *cmd,
283 struct nvk_shader *shader,
284 const void *push_data, size_t push_size,
285 uint32_t groupCountX,
286 uint32_t groupCountY,
287 uint32_t groupCountZ)
288 {
289 struct nvk_root_descriptor_table root = {
290 .cs.group_count = {
291 groupCountX,
292 groupCountY,
293 groupCountZ,
294 },
295 };
296 assert(push_size <= sizeof(root.push));
297 memcpy(root.push, push_data, push_size);
298
299 uint64_t qmd_addr;
300 VkResult result = nvk_cmd_upload_qmd(cmd, shader, NULL, &root,
301 root.cs.group_count,
302 &qmd_addr, NULL);
303 if (result != VK_SUCCESS) {
304 vk_command_buffer_set_error(&cmd->vk, result);
305 return;
306 }
307
308 struct nv_push *p = nvk_cmd_buffer_push(cmd, 8);
309
310 /* Internal shaders don't want conditional rendering */
311 P_IMMD(p, NVA0C0, SET_RENDER_ENABLE_OVERRIDE, MODE_ALWAYS_RENDER);
312
313 P_MTHD(p, NVA0C0, SEND_PCAS_A);
314 P_NVA0C0_SEND_PCAS_A(p, qmd_addr >> 8);
315
316 if (nvk_cmd_buffer_compute_cls(cmd) <= TURING_COMPUTE_A) {
317 P_IMMD(p, NVA0C0, SEND_SIGNALING_PCAS_B, {
318 .invalidate = INVALIDATE_TRUE,
319 .schedule = SCHEDULE_TRUE
320 });
321 } else {
322 P_IMMD(p, NVC6C0, SEND_SIGNALING_PCAS2_B,
323 PCAS_ACTION_INVALIDATE_COPY_SCHEDULE);
324 }
325
326 P_IMMD(p, NVA0C0, SET_RENDER_ENABLE_OVERRIDE, MODE_USE_RENDER_ENABLE);
327 }
328
329 static void
mme_store_global(struct mme_builder * b,struct mme_value64 addr,struct mme_value v)330 mme_store_global(struct mme_builder *b,
331 struct mme_value64 addr,
332 struct mme_value v)
333 {
334 mme_mthd(b, NV9097_SET_REPORT_SEMAPHORE_A);
335 mme_emit_addr64(b, addr);
336 mme_emit(b, v);
337 mme_emit(b, mme_imm(0x10000000));
338 }
339
340 static void
mme_store_global_vec3_free_addr(struct mme_builder * b,struct mme_value64 addr,uint32_t offset,struct mme_value x,struct mme_value y,struct mme_value z)341 mme_store_global_vec3_free_addr(struct mme_builder *b,
342 struct mme_value64 addr,
343 uint32_t offset,
344 struct mme_value x,
345 struct mme_value y,
346 struct mme_value z)
347 {
348 if (offset > 0)
349 mme_add64_to(b, addr, addr, mme_imm64(offset));
350
351 mme_store_global(b, addr, x);
352 mme_add64_to(b, addr, addr, mme_imm64(4));
353 mme_store_global(b, addr, y);
354 mme_add64_to(b, addr, addr, mme_imm64(4));
355 mme_store_global(b, addr, z);
356 mme_free_reg64(b, addr);
357 }
358
359 static void
mme_store_root_desc_group_count(struct mme_builder * b,struct mme_value64 root_desc_addr,struct mme_value group_count_x,struct mme_value group_count_y,struct mme_value group_count_z)360 mme_store_root_desc_group_count(struct mme_builder *b,
361 struct mme_value64 root_desc_addr,
362 struct mme_value group_count_x,
363 struct mme_value group_count_y,
364 struct mme_value group_count_z)
365 {
366 uint32_t root_desc_size_offset =
367 offsetof(struct nvk_root_descriptor_table, cs.group_count);
368 mme_store_global_vec3_free_addr(b, root_desc_addr,
369 root_desc_size_offset,
370 group_count_x,
371 group_count_y,
372 group_count_z);
373 }
374
375 static void
mme_store_qmd_dispatch_size(struct mme_builder * b,struct mme_value64 qmd_addr,struct mme_value group_count_x,struct mme_value group_count_y,struct mme_value group_count_z)376 mme_store_qmd_dispatch_size(struct mme_builder *b,
377 struct mme_value64 qmd_addr,
378 struct mme_value group_count_x,
379 struct mme_value group_count_y,
380 struct mme_value group_count_z)
381 {
382 struct nak_qmd_dispatch_size_layout qmd_size_layout =
383 nak_get_qmd_dispatch_size_layout(b->devinfo);
384 assert(qmd_size_layout.y_start == qmd_size_layout.x_start + 32);
385
386 if (qmd_size_layout.z_start == qmd_size_layout.y_start + 32) {
387 mme_store_global_vec3_free_addr(b, qmd_addr,
388 qmd_size_layout.x_start / 8,
389 group_count_x,
390 group_count_y,
391 group_count_z);
392 } else {
393 mme_add64_to(b, qmd_addr, qmd_addr,
394 mme_imm64(qmd_size_layout.x_start / 8));
395 mme_store_global(b, qmd_addr, group_count_x);
396
397 assert(qmd_size_layout.z_start == qmd_size_layout.y_start + 16);
398 struct mme_value group_count_yz =
399 mme_merge(b, group_count_y, group_count_z, 16, 16, 0);
400 mme_add64_to(b, qmd_addr, qmd_addr, mme_imm64(4));
401 mme_store_global(b, qmd_addr, group_count_yz);
402 mme_free_reg(b, group_count_yz);
403
404 mme_free_reg64(b, qmd_addr);
405 };
406 }
407
408 void
nvk_mme_dispatch_indirect(struct mme_builder * b)409 nvk_mme_dispatch_indirect(struct mme_builder *b)
410 {
411 if (b->devinfo->cls_eng3d >= TURING_A) {
412 /* Load everything before we switch to an indirect read */
413 struct mme_value64 dispatch_addr = mme_load_addr64(b);
414 struct mme_value64 root_desc_addr = mme_load_addr64(b);
415 struct mme_value64 qmd_addr = mme_load_addr64(b);
416 struct mme_value local_size = mme_load(b);
417
418 mme_tu104_read_fifoed(b, dispatch_addr, mme_imm(3));
419 mme_free_reg64(b, dispatch_addr);
420 struct mme_value group_count_x = mme_load(b);
421 struct mme_value group_count_y = mme_load(b);
422 struct mme_value group_count_z = mme_load(b);
423
424 mme_store_root_desc_group_count(b, root_desc_addr,
425 group_count_x,
426 group_count_y,
427 group_count_z);
428
429 mme_store_qmd_dispatch_size(b, qmd_addr,
430 group_count_x,
431 group_count_y,
432 group_count_z);
433
434 struct mme_value64 cs1 = mme_umul_32x32_64(b, group_count_y,
435 group_count_z);
436 struct mme_value64 cs2 = mme_umul_32x32_64(b, group_count_x,
437 local_size);
438 struct mme_value64 count = mme_mul64(b, cs1, cs2);
439 mme_free_reg64(b, cs1);
440 mme_free_reg64(b, cs2);
441
442 nvk_build_mme_add_cs_invocations(b, count);
443 } else {
444 struct mme_value group_count_x = mme_load(b);
445 struct mme_value group_count_y = mme_load(b);
446 struct mme_value group_count_z = mme_load(b);
447
448 struct mme_value64 root_desc_addr = mme_load_addr64(b);
449 mme_store_root_desc_group_count(b, root_desc_addr,
450 group_count_x,
451 group_count_y,
452 group_count_z);
453
454 struct mme_value64 qmd_addr = mme_load_addr64(b);
455 mme_store_qmd_dispatch_size(b, qmd_addr,
456 group_count_x,
457 group_count_y,
458 group_count_z);
459
460 /* Y and Z are 16b, so this cant't overflow */
461 struct mme_value cs1 =
462 mme_mul_32x32_32_free_srcs(b, group_count_y, group_count_z);
463 struct mme_value64 cs2 =
464 mme_umul_32x32_64_free_srcs(b, group_count_x, cs1);
465 struct mme_value local_size = mme_load(b);
466 struct mme_value64 count =
467 mme_umul_32x64_64_free_srcs(b, local_size, cs2);
468
469 nvk_build_mme_add_cs_invocations(b, count);
470 }
471 }
472
473 VKAPI_ATTR void VKAPI_CALL
nvk_CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset)474 nvk_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
475 VkBuffer _buffer,
476 VkDeviceSize offset)
477 {
478 VK_FROM_HANDLE(nvk_cmd_buffer, cmd, commandBuffer);
479 VK_FROM_HANDLE(nvk_buffer, buffer, _buffer);
480 struct nvk_descriptor_state *desc = &cmd->state.cs.descriptors;
481
482 uint64_t dispatch_addr = nvk_buffer_address(buffer, offset);
483
484 /* We set these through the MME */
485 uint32_t base_workgroup[3] = { 0, 0, 0 };
486 uint32_t global_size[3] = { 0, 0, 0 };
487 nvk_flush_compute_state(cmd, base_workgroup, global_size);
488
489 uint64_t qmd_addr, root_desc_addr;
490 VkResult result = nvk_cmd_upload_qmd(cmd, cmd->state.cs.shader,
491 desc, (void *)desc->root, global_size,
492 &qmd_addr, &root_desc_addr);
493 if (result != VK_SUCCESS) {
494 vk_command_buffer_set_error(&cmd->vk, result);
495 return;
496 }
497
498 struct nv_push *p;
499 if (nvk_cmd_buffer_compute_cls(cmd) >= TURING_A) {
500 p = nvk_cmd_buffer_push(cmd, 14);
501 P_IMMD(p, NVC597, SET_MME_DATA_FIFO_CONFIG, FIFO_SIZE_SIZE_4KB);
502 P_1INC(p, NV9097, CALL_MME_MACRO(NVK_MME_DISPATCH_INDIRECT));
503 P_INLINE_DATA(p, dispatch_addr >> 32);
504 P_INLINE_DATA(p, dispatch_addr);
505 P_INLINE_DATA(p, root_desc_addr >> 32);
506 P_INLINE_DATA(p, root_desc_addr);
507 P_INLINE_DATA(p, qmd_addr >> 32);
508 P_INLINE_DATA(p, qmd_addr);
509 P_INLINE_DATA(p, nvk_compute_local_size(cmd));
510 } else {
511 p = nvk_cmd_buffer_push(cmd, 5);
512 /* Stall the command streamer */
513 __push_immd(p, SUBC_NV9097, NV906F_SET_REFERENCE, 0);
514
515 P_1INC(p, NV9097, CALL_MME_MACRO(NVK_MME_DISPATCH_INDIRECT));
516 nv_push_update_count(p, sizeof(VkDispatchIndirectCommand) / 4);
517 nvk_cmd_buffer_push_indirect(cmd, dispatch_addr, sizeof(VkDispatchIndirectCommand));
518 p = nvk_cmd_buffer_push(cmd, 9);
519 P_INLINE_DATA(p, root_desc_addr >> 32);
520 P_INLINE_DATA(p, root_desc_addr);
521 P_INLINE_DATA(p, qmd_addr >> 32);
522 P_INLINE_DATA(p, qmd_addr);
523 P_INLINE_DATA(p, nvk_compute_local_size(cmd));
524 }
525
526 P_MTHD(p, NVA0C0, SEND_PCAS_A);
527 P_NVA0C0_SEND_PCAS_A(p, qmd_addr >> 8);
528 if (nvk_cmd_buffer_compute_cls(cmd) <= TURING_COMPUTE_A) {
529 P_IMMD(p, NVA0C0, SEND_SIGNALING_PCAS_B, {
530 .invalidate = INVALIDATE_TRUE,
531 .schedule = SCHEDULE_TRUE
532 });
533 } else {
534 P_IMMD(p, NVC6C0, SEND_SIGNALING_PCAS2_B,
535 PCAS_ACTION_INVALIDATE_COPY_SCHEDULE);
536 }
537 }
538