1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2017 Red Hat
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include "vtn_private.h"
27 #include "GLSL.ext.AMD.h"
28
29 bool
vtn_handle_amd_gcn_shader_instruction(struct vtn_builder * b,SpvOp ext_opcode,const uint32_t * w,unsigned count)30 vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode,
31 const uint32_t *w, unsigned count)
32 {
33 nir_def *def;
34 switch ((enum GcnShaderAMD)ext_opcode) {
35 case CubeFaceIndexAMD:
36 def = nir_channel(&b->nb, nir_cube_amd(&b->nb, vtn_get_nir_ssa(b, w[5])), 3);
37 break;
38 case CubeFaceCoordAMD: {
39 def = nir_cube_amd(&b->nb, vtn_get_nir_ssa(b, w[5]));
40 nir_def *st = nir_swizzle(&b->nb, def, (unsigned[]){1, 0}, 2);
41 nir_def *invma = nir_frcp(&b->nb, nir_channel(&b->nb, def, 2));
42 def = nir_ffma_imm2(&b->nb, st, invma, 0.5);
43 break;
44 }
45 case TimeAMD: {
46 def = nir_pack_64_2x32(&b->nb, nir_shader_clock(&b->nb, SCOPE_SUBGROUP));
47 break;
48 }
49 default:
50 unreachable("Invalid opcode");
51 }
52
53 vtn_push_nir_ssa(b, w[2], def);
54
55 return true;
56 }
57
58 bool
vtn_handle_amd_shader_ballot_instruction(struct vtn_builder * b,SpvOp ext_opcode,const uint32_t * w,unsigned count)59 vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode,
60 const uint32_t *w, unsigned count)
61 {
62 unsigned num_args;
63 nir_intrinsic_op op;
64 switch ((enum ShaderBallotAMD)ext_opcode) {
65 case SwizzleInvocationsAMD:
66 num_args = 1;
67 op = nir_intrinsic_quad_swizzle_amd;
68 break;
69 case SwizzleInvocationsMaskedAMD:
70 num_args = 1;
71 op = nir_intrinsic_masked_swizzle_amd;
72 break;
73 case WriteInvocationAMD:
74 num_args = 3;
75 op = nir_intrinsic_write_invocation_amd;
76 break;
77 case MbcntAMD:
78 num_args = 1;
79 op = nir_intrinsic_mbcnt_amd;
80 break;
81 default:
82 unreachable("Invalid opcode");
83 }
84
85 const struct glsl_type *dest_type = vtn_get_type(b, w[1])->type;
86 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
87 nir_def_init_for_type(&intrin->instr, &intrin->def, dest_type);
88 if (nir_intrinsic_infos[op].src_components[0] == 0)
89 intrin->num_components = intrin->def.num_components;
90
91 for (unsigned i = 0; i < num_args; i++)
92 intrin->src[i] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[i + 5]));
93
94 if (intrin->intrinsic == nir_intrinsic_quad_swizzle_amd) {
95 struct vtn_value *val = vtn_value(b, w[6], vtn_value_type_constant);
96 unsigned mask = val->constant->values[0].u32 |
97 val->constant->values[1].u32 << 2 |
98 val->constant->values[2].u32 << 4 |
99 val->constant->values[3].u32 << 6;
100 nir_intrinsic_set_swizzle_mask(intrin, mask);
101
102 } else if (intrin->intrinsic == nir_intrinsic_masked_swizzle_amd) {
103 struct vtn_value *val = vtn_value(b, w[6], vtn_value_type_constant);
104 unsigned mask = val->constant->values[0].u32 |
105 val->constant->values[1].u32 << 5 |
106 val->constant->values[2].u32 << 10;
107 nir_intrinsic_set_swizzle_mask(intrin, mask);
108 } else if (intrin->intrinsic == nir_intrinsic_mbcnt_amd) {
109 /* The v_mbcnt instruction has an additional source that is added to the result.
110 * This is exposed by the NIR intrinsic but not by SPIR-V, so we add zero here.
111 */
112 intrin->src[1] = nir_src_for_ssa(nir_imm_int(&b->nb, 0));
113 }
114
115 nir_builder_instr_insert(&b->nb, &intrin->instr);
116 vtn_push_nir_ssa(b, w[2], &intrin->def);
117
118 return true;
119 }
120
121 bool
vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder * b,SpvOp ext_opcode,const uint32_t * w,unsigned count)122 vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder *b, SpvOp ext_opcode,
123 const uint32_t *w, unsigned count)
124 {
125 struct nir_builder *nb = &b->nb;
126
127 unsigned num_inputs = count - 5;
128 assert(num_inputs == 3);
129 nir_def *src[3] = { NULL, };
130 for (unsigned i = 0; i < num_inputs; i++)
131 src[i] = vtn_get_nir_ssa(b, w[i + 5]);
132
133 /* place constants at src[1-2] for easier constant-folding */
134 for (unsigned i = 1; i <= 2; i++) {
135 if (nir_src_as_const_value(nir_src_for_ssa(src[0]))) {
136 nir_def* tmp = src[i];
137 src[i] = src[0];
138 src[0] = tmp;
139 }
140 }
141 nir_def *def;
142 switch ((enum ShaderTrinaryMinMaxAMD)ext_opcode) {
143 case FMin3AMD:
144 def = nir_fmin(nb, src[0], nir_fmin(nb, src[1], src[2]));
145 break;
146 case UMin3AMD:
147 def = nir_umin(nb, src[0], nir_umin(nb, src[1], src[2]));
148 break;
149 case SMin3AMD:
150 def = nir_imin(nb, src[0], nir_imin(nb, src[1], src[2]));
151 break;
152 case FMax3AMD:
153 def = nir_fmax(nb, src[0], nir_fmax(nb, src[1], src[2]));
154 break;
155 case UMax3AMD:
156 def = nir_umax(nb, src[0], nir_umax(nb, src[1], src[2]));
157 break;
158 case SMax3AMD:
159 def = nir_imax(nb, src[0], nir_imax(nb, src[1], src[2]));
160 break;
161 case FMid3AMD:
162 def = nir_fmin(nb, nir_fmax(nb, src[0], nir_fmin(nb, src[1], src[2])),
163 nir_fmax(nb, src[1], src[2]));
164 break;
165 case UMid3AMD:
166 def = nir_umin(nb, nir_umax(nb, src[0], nir_umin(nb, src[1], src[2])),
167 nir_umax(nb, src[1], src[2]));
168 break;
169 case SMid3AMD:
170 def = nir_imin(nb, nir_imax(nb, src[0], nir_imin(nb, src[1], src[2])),
171 nir_imax(nb, src[1], src[2]));
172 break;
173 default:
174 unreachable("unknown opcode\n");
175 break;
176 }
177
178 vtn_push_nir_ssa(b, w[2], def);
179
180 return true;
181 }
182
183 bool
vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder * b,SpvOp ext_opcode,const uint32_t * w,unsigned count)184 vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder *b, SpvOp ext_opcode,
185 const uint32_t *w, unsigned count)
186 {
187 nir_intrinsic_op op;
188 switch ((enum ShaderExplicitVertexParameterAMD)ext_opcode) {
189 case InterpolateAtVertexAMD:
190 op = nir_intrinsic_interp_deref_at_vertex;
191 break;
192 default:
193 unreachable("unknown opcode");
194 }
195
196 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
197
198 struct vtn_pointer *ptr =
199 vtn_value(b, w[5], vtn_value_type_pointer)->pointer;
200 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
201
202 /* If the value we are interpolating has an index into a vector then
203 * interpolate the vector and index the result of that instead. This is
204 * necessary because the index will get generated as a series of nir_bcsel
205 * instructions so it would no longer be an input variable.
206 */
207 const bool vec_array_deref = deref->deref_type == nir_deref_type_array &&
208 glsl_type_is_vector(nir_deref_instr_parent(deref)->type);
209
210 nir_deref_instr *vec_deref = NULL;
211 if (vec_array_deref) {
212 vec_deref = deref;
213 deref = nir_deref_instr_parent(deref);
214 }
215 intrin->src[0] = nir_src_for_ssa(&deref->def);
216 intrin->src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
217
218 intrin->num_components = glsl_get_vector_elements(deref->type);
219 nir_def_init(&intrin->instr, &intrin->def,
220 glsl_get_vector_elements(deref->type),
221 glsl_get_bit_size(deref->type));
222
223 nir_builder_instr_insert(&b->nb, &intrin->instr);
224
225 nir_def *def;
226 if (vec_array_deref) {
227 assert(vec_deref);
228 def = nir_vector_extract(&b->nb, &intrin->def,
229 vec_deref->arr.index.ssa);
230 } else {
231 def = &intrin->def;
232 }
233 vtn_push_nir_ssa(b, w[2], def);
234
235 return true;
236 }
237