1 /*
2 * Copyright © 2019 Valve Corporation
3 * Copyright © 2018 Red Hat
4 *
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include "radv_formats.h"
9 #include "radv_meta.h"
10 #include "vk_format.h"
11
12 static nir_shader *
build_fmask_expand_compute_shader(struct radv_device * device,int samples)13 build_fmask_expand_compute_shader(struct radv_device *device, int samples)
14 {
15 const struct glsl_type *type = glsl_sampler_type(GLSL_SAMPLER_DIM_MS, false, true, GLSL_TYPE_FLOAT);
16 const struct glsl_type *img_type = glsl_image_type(GLSL_SAMPLER_DIM_MS, true, GLSL_TYPE_FLOAT);
17
18 nir_builder b = radv_meta_init_shader(device, MESA_SHADER_COMPUTE, "meta_fmask_expand_cs-%d", samples);
19 b.shader->info.workgroup_size[0] = 8;
20 b.shader->info.workgroup_size[1] = 8;
21
22 nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, type, "s_tex");
23 input_img->data.descriptor_set = 0;
24 input_img->data.binding = 0;
25
26 nir_variable *output_img = nir_variable_create(b.shader, nir_var_image, img_type, "out_img");
27 output_img->data.descriptor_set = 0;
28 output_img->data.binding = 1;
29 output_img->data.access = ACCESS_NON_READABLE;
30
31 nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img);
32 nir_def *output_img_deref = &nir_build_deref_var(&b, output_img)->def;
33
34 nir_def *tex_coord = get_global_ids(&b, 3);
35
36 nir_def *tex_vals[8];
37 for (uint32_t i = 0; i < samples; i++) {
38 tex_vals[i] = nir_txf_ms_deref(&b, input_img_deref, tex_coord, nir_imm_int(&b, i));
39 }
40
41 nir_def *img_coord = nir_vec4(&b, nir_channel(&b, tex_coord, 0), nir_channel(&b, tex_coord, 1),
42 nir_channel(&b, tex_coord, 2), nir_undef(&b, 1, 32));
43
44 for (uint32_t i = 0; i < samples; i++) {
45 nir_image_deref_store(&b, output_img_deref, img_coord, nir_imm_int(&b, i), tex_vals[i], nir_imm_int(&b, 0),
46 .image_dim = GLSL_SAMPLER_DIM_MS, .image_array = true);
47 }
48
49 return b.shader;
50 }
51
52 static VkResult
create_pipeline(struct radv_device * device,int samples,VkPipeline * pipeline)53 create_pipeline(struct radv_device *device, int samples, VkPipeline *pipeline)
54 {
55 struct radv_meta_state *state = &device->meta_state;
56 VkResult result;
57
58 if (!state->fmask_expand.ds_layout) {
59 const VkDescriptorSetLayoutBinding bindings[] = {
60 {
61 .binding = 0,
62 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
63 .descriptorCount = 1,
64 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
65 },
66 {
67 .binding = 1,
68 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
69 .descriptorCount = 1,
70 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
71 },
72 };
73
74 result = radv_meta_create_descriptor_set_layout(device, 2, bindings, &state->fmask_expand.ds_layout);
75 if (result != VK_SUCCESS)
76 return result;
77 }
78
79 if (!state->fmask_expand.p_layout) {
80 result = radv_meta_create_pipeline_layout(device, &state->fmask_expand.ds_layout, 0, NULL,
81 &state->fmask_expand.p_layout);
82 if (result != VK_SUCCESS)
83 return result;
84 }
85
86 nir_shader *cs = build_fmask_expand_compute_shader(device, samples);
87
88 result = radv_meta_create_compute_pipeline(device, cs, state->fmask_expand.p_layout, pipeline);
89
90 ralloc_free(cs);
91 return result;
92 }
93
94 static VkResult
get_pipeline(struct radv_device * device,uint32_t samples_log2,VkPipeline * pipeline_out)95 get_pipeline(struct radv_device *device, uint32_t samples_log2, VkPipeline *pipeline_out)
96 {
97 struct radv_meta_state *state = &device->meta_state;
98 VkResult result = VK_SUCCESS;
99
100 mtx_lock(&state->mtx);
101 if (!state->fmask_expand.pipeline[samples_log2]) {
102 result = create_pipeline(device, 1 << samples_log2, &state->fmask_expand.pipeline[samples_log2]);
103 if (result != VK_SUCCESS)
104 goto fail;
105 }
106
107 *pipeline_out = state->fmask_expand.pipeline[samples_log2];
108
109 fail:
110 mtx_unlock(&state->mtx);
111 return result;
112 }
113
114 void
radv_expand_fmask_image_inplace(struct radv_cmd_buffer * cmd_buffer,struct radv_image * image,const VkImageSubresourceRange * subresourceRange)115 radv_expand_fmask_image_inplace(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
116 const VkImageSubresourceRange *subresourceRange)
117 {
118 struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
119 struct radv_meta_saved_state saved_state;
120 const uint32_t samples = image->vk.samples;
121 const uint32_t samples_log2 = ffs(samples) - 1;
122 unsigned layer_count = vk_image_subresource_layer_count(&image->vk, subresourceRange);
123 struct radv_image_view iview;
124 VkPipeline pipeline;
125 VkResult result;
126
127 result = get_pipeline(device, samples_log2, &pipeline);
128 if (result != VK_SUCCESS) {
129 vk_command_buffer_set_error(&cmd_buffer->vk, result);
130 return;
131 }
132
133 radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS);
134
135 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
136
137 cmd_buffer->state.flush_bits |=
138 radv_dst_access_flush(cmd_buffer, VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT, VK_ACCESS_2_SHADER_READ_BIT, image);
139
140 radv_image_view_init(&iview, device,
141 &(VkImageViewCreateInfo){
142 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
143 .image = radv_image_to_handle(image),
144 .viewType = radv_meta_get_view_type(image),
145 .format = vk_format_no_srgb(image->vk.format),
146 .subresourceRange =
147 {
148 .aspectMask = subresourceRange->aspectMask,
149 .baseMipLevel = 0,
150 .levelCount = 1,
151 .baseArrayLayer = subresourceRange->baseArrayLayer,
152 .layerCount = layer_count,
153 },
154 },
155 0, NULL);
156
157 radv_meta_push_descriptor_set(cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, device->meta_state.fmask_expand.p_layout,
158 0, 2,
159 (VkWriteDescriptorSet[]){{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
160 .dstBinding = 0,
161 .dstArrayElement = 0,
162 .descriptorCount = 1,
163 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
164 .pImageInfo =
165 (VkDescriptorImageInfo[]){
166 {.sampler = VK_NULL_HANDLE,
167 .imageView = radv_image_view_to_handle(&iview),
168 .imageLayout = VK_IMAGE_LAYOUT_GENERAL},
169 }},
170 {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
171 .dstBinding = 1,
172 .dstArrayElement = 0,
173 .descriptorCount = 1,
174 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
175 .pImageInfo = (VkDescriptorImageInfo[]){
176 {.sampler = VK_NULL_HANDLE,
177 .imageView = radv_image_view_to_handle(&iview),
178 .imageLayout = VK_IMAGE_LAYOUT_GENERAL},
179 }}});
180
181 radv_unaligned_dispatch(cmd_buffer, image->vk.extent.width, image->vk.extent.height, layer_count);
182
183 radv_image_view_finish(&iview);
184
185 radv_meta_restore(&saved_state, cmd_buffer);
186
187 cmd_buffer->state.flush_bits |=
188 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
189 radv_src_access_flush(cmd_buffer, VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT, VK_ACCESS_2_SHADER_WRITE_BIT, image);
190
191 /* Re-initialize FMASK in fully expanded mode. */
192 cmd_buffer->state.flush_bits |= radv_init_fmask(cmd_buffer, image, subresourceRange);
193 }
194
195 void
radv_device_finish_meta_fmask_expand_state(struct radv_device * device)196 radv_device_finish_meta_fmask_expand_state(struct radv_device *device)
197 {
198 struct radv_meta_state *state = &device->meta_state;
199
200 for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; ++i) {
201 radv_DestroyPipeline(radv_device_to_handle(device), state->fmask_expand.pipeline[i], &state->alloc);
202 }
203 radv_DestroyPipelineLayout(radv_device_to_handle(device), state->fmask_expand.p_layout, &state->alloc);
204
205 device->vk.dispatch_table.DestroyDescriptorSetLayout(radv_device_to_handle(device), state->fmask_expand.ds_layout,
206 &state->alloc);
207 }
208
209 VkResult
radv_device_init_meta_fmask_expand_state(struct radv_device * device,bool on_demand)210 radv_device_init_meta_fmask_expand_state(struct radv_device *device, bool on_demand)
211 {
212 struct radv_meta_state *state = &device->meta_state;
213 VkResult result;
214
215 if (on_demand)
216 return VK_SUCCESS;
217
218 for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; i++) {
219 result = create_pipeline(device, 1 << i, &state->fmask_expand.pipeline[i]);
220 if (result != VK_SUCCESS)
221 return result;
222 }
223
224 return VK_SUCCESS;
225 }
226