1 /*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23 #include "nir_test.h"
24 #include "nir_range_analysis.h"
25
26 class ssa_def_bits_used_test : public nir_test {
27 protected:
ssa_def_bits_used_test()28 ssa_def_bits_used_test()
29 : nir_test::nir_test("nir_def_bits_used_test")
30 {
31 }
32
33 nir_alu_instr *build_alu_instr(nir_op op, nir_def *, nir_def *);
34 };
35
36 class unsigned_upper_bound_test : public nir_test {
37 protected:
unsigned_upper_bound_test()38 unsigned_upper_bound_test()
39 : nir_test::nir_test("nir_unsigned_upper_bound_test")
40 {
41 }
42 };
43
44 static bool
is_used_once(const nir_def * def)45 is_used_once(const nir_def *def)
46 {
47 return list_is_singular(&def->uses);
48 }
49
50 nir_alu_instr *
build_alu_instr(nir_op op,nir_def * src0,nir_def * src1)51 ssa_def_bits_used_test::build_alu_instr(nir_op op,
52 nir_def *src0, nir_def *src1)
53 {
54 nir_def *def = nir_build_alu(b, op, src0, src1, NULL, NULL);
55
56 if (def == NULL)
57 return NULL;
58
59 nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
60
61 if (alu == NULL)
62 return NULL;
63
64 alu->def.num_components = 1;
65
66 return alu;
67 }
68
TEST_F(ssa_def_bits_used_test,iand_with_const_vector)69 TEST_F(ssa_def_bits_used_test, iand_with_const_vector)
70 {
71 static const unsigned src0_imm[4] = { 255u << 24, 255u << 16, 255u << 8, 255u };
72
73 nir_def *src0 = nir_imm_ivec4(b,
74 src0_imm[0], src0_imm[1],
75 src0_imm[2], src0_imm[3]);
76 nir_def *src1 = nir_imm_int(b, 0xffffffff);
77
78 nir_alu_instr *alu = build_alu_instr(nir_op_iand, src0, src1);
79
80 ASSERT_NE((void *) 0, alu);
81
82 for (unsigned i = 0; i < 4; i++) {
83 /* If the test is changed, and somehow src1 is used multiple times,
84 * nir_def_bits_used will accumulate *all* the uses (as it should).
85 * This isn't what we're trying to test here.
86 */
87 ASSERT_TRUE(is_used_once(src1));
88
89 alu->src[0].swizzle[0] = i;
90
91 const uint64_t bits_used = nir_def_bits_used(alu->src[1].src.ssa);
92
93 /* The answer should be the value swizzled from src0. */
94 EXPECT_EQ(src0_imm[i], bits_used);
95 }
96 }
97
TEST_F(ssa_def_bits_used_test,ior_with_const_vector)98 TEST_F(ssa_def_bits_used_test, ior_with_const_vector)
99 {
100 static const unsigned src0_imm[4] = { 255u << 24, 255u << 16, 255u << 8, 255u };
101
102 nir_def *src0 = nir_imm_ivec4(b,
103 src0_imm[0], src0_imm[1],
104 src0_imm[2], src0_imm[3]);
105 nir_def *src1 = nir_imm_int(b, 0xffffffff);
106
107 nir_alu_instr *alu = build_alu_instr(nir_op_ior, src0, src1);
108
109 ASSERT_NE((void *) 0, alu);
110
111 for (unsigned i = 0; i < 4; i++) {
112 /* If the test is changed, and somehow src1 is used multiple times,
113 * nir_def_bits_used will accumulate *all* the uses (as it should).
114 * This isn't what we're trying to test here.
115 */
116 ASSERT_TRUE(is_used_once(src1));
117
118 alu->src[0].swizzle[0] = i;
119
120 const uint64_t bits_used = nir_def_bits_used(alu->src[1].src.ssa);
121
122 /* The answer should be the value swizzled from ~src0. */
123 EXPECT_EQ(~src0_imm[i], bits_used);
124 }
125 }
126
TEST_F(ssa_def_bits_used_test,extract_i16_with_const_index)127 TEST_F(ssa_def_bits_used_test, extract_i16_with_const_index)
128 {
129 nir_def *src0 = nir_imm_int(b, 0xffffffff);
130
131 static const unsigned src1_imm[4] = { 9, 1, 0, 9 };
132
133 nir_def *src1 = nir_imm_ivec4(b,
134 src1_imm[0],
135 src1_imm[1],
136 src1_imm[2],
137 src1_imm[3]);
138
139 nir_alu_instr *alu = build_alu_instr(nir_op_extract_i16, src0, src1);
140
141 ASSERT_NE((void *) 0, alu);
142
143 for (unsigned i = 1; i < 3; i++) {
144 /* If the test is changed, and somehow src1 is used multiple times,
145 * nir_def_bits_used will accumulate *all* the uses (as it should).
146 * This isn't what we're trying to test here.
147 */
148 ASSERT_TRUE(is_used_once(src1));
149
150 alu->src[1].swizzle[0] = i;
151
152 const uint64_t bits_used = nir_def_bits_used(alu->src[0].src.ssa);
153
154 EXPECT_EQ(0xffffu << (16 * src1_imm[i]), bits_used);
155 }
156 }
157
TEST_F(ssa_def_bits_used_test,extract_u16_with_const_index)158 TEST_F(ssa_def_bits_used_test, extract_u16_with_const_index)
159 {
160 nir_def *src0 = nir_imm_int(b, 0xffffffff);
161
162 static const unsigned src1_imm[4] = { 9, 1, 0, 9 };
163
164 nir_def *src1 = nir_imm_ivec4(b,
165 src1_imm[0],
166 src1_imm[1],
167 src1_imm[2],
168 src1_imm[3]);
169
170 nir_alu_instr *alu = build_alu_instr(nir_op_extract_u16, src0, src1);
171
172 ASSERT_NE((void *) 0, alu);
173
174 for (unsigned i = 1; i < 3; i++) {
175 /* If the test is changed, and somehow src1 is used multiple times,
176 * nir_def_bits_used will accumulate *all* the uses (as it should).
177 * This isn't what we're trying to test here.
178 */
179 ASSERT_TRUE(is_used_once(src1));
180
181 alu->src[1].swizzle[0] = i;
182
183 const uint64_t bits_used = nir_def_bits_used(alu->src[0].src.ssa);
184
185 EXPECT_EQ(0xffffu << (16 * src1_imm[i]), bits_used);
186 }
187 }
188
TEST_F(ssa_def_bits_used_test,extract_i8_with_const_index)189 TEST_F(ssa_def_bits_used_test, extract_i8_with_const_index)
190 {
191 nir_def *src0 = nir_imm_int(b, 0xffffffff);
192
193 static const unsigned src1_imm[4] = { 3, 2, 1, 0 };
194
195 nir_def *src1 = nir_imm_ivec4(b,
196 src1_imm[0],
197 src1_imm[1],
198 src1_imm[2],
199 src1_imm[3]);
200
201 nir_alu_instr *alu = build_alu_instr(nir_op_extract_i8, src0, src1);
202
203 ASSERT_NE((void *) 0, alu);
204
205 for (unsigned i = 0; i < 4; i++) {
206 /* If the test is changed, and somehow src1 is used multiple times,
207 * nir_def_bits_used will accumulate *all* the uses (as it should).
208 * This isn't what we're trying to test here.
209 */
210 ASSERT_TRUE(is_used_once(src1));
211
212 alu->src[1].swizzle[0] = i;
213
214 const uint64_t bits_used = nir_def_bits_used(alu->src[0].src.ssa);
215
216 EXPECT_EQ(0xffu << (8 * src1_imm[i]), bits_used);
217 }
218 }
219
TEST_F(ssa_def_bits_used_test,extract_u8_with_const_index)220 TEST_F(ssa_def_bits_used_test, extract_u8_with_const_index)
221 {
222 nir_def *src0 = nir_imm_int(b, 0xffffffff);
223
224 static const unsigned src1_imm[4] = { 3, 2, 1, 0 };
225
226 nir_def *src1 = nir_imm_ivec4(b,
227 src1_imm[0],
228 src1_imm[1],
229 src1_imm[2],
230 src1_imm[3]);
231
232 nir_alu_instr *alu = build_alu_instr(nir_op_extract_u8, src0, src1);
233
234 ASSERT_NE((void *) 0, alu);
235
236 for (unsigned i = 0; i < 4; i++) {
237 /* If the test is changed, and somehow src1 is used multiple times,
238 * nir_def_bits_used will accumulate *all* the uses (as it should).
239 * This isn't what we're trying to test here.
240 */
241 ASSERT_TRUE(is_used_once(src1));
242
243 alu->src[1].swizzle[0] = i;
244
245 const uint64_t bits_used = nir_def_bits_used(alu->src[0].src.ssa);
246
247 EXPECT_EQ(0xffu << (8 * src1_imm[i]), bits_used);
248 }
249 }
250
251 /* Unsigned upper bound analysis should look through a bcsel which uses the phi. */
TEST_F(unsigned_upper_bound_test,loop_phi_bcsel)252 TEST_F(unsigned_upper_bound_test, loop_phi_bcsel)
253 {
254 /*
255 * impl main {
256 * block b0: // preds:
257 * 32 %0 = load_const (0x00000000 = 0.000000)
258 * 32 %1 = load_const (0x00000002 = 0.000000)
259 * 1 %2 = load_const (false)
260 * // succs: b1
261 * loop {
262 * block b1: // preds: b0 b1
263 * 32 %4 = phi b0: %0 (0x0), b1: %3
264 * 32 %3 = bcsel %2 (false), %4, %1 (0x2)
265 * // succs: b1
266 * }
267 * block b2: // preds: , succs: b3
268 * block b3:
269 * }
270 */
271 nir_def *zero = nir_imm_int(b, 0);
272 nir_def *two = nir_imm_int(b, 2);
273 nir_def *cond = nir_imm_false(b);
274
275 nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
276 nir_def_init(&phi->instr, &phi->def, 1, 32);
277
278 nir_push_loop(b);
279 nir_def *sel = nir_bcsel(b, cond, &phi->def, two);
280 nir_pop_loop(b, NULL);
281
282 nir_phi_instr_add_src(phi, zero->parent_instr->block, zero);
283 nir_phi_instr_add_src(phi, sel->parent_instr->block, sel);
284 b->cursor = nir_before_instr(sel->parent_instr);
285 nir_builder_instr_insert(b, &phi->instr);
286
287 nir_validate_shader(b->shader, NULL);
288
289 struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
290 nir_scalar scalar = nir_get_scalar(&phi->def, 0);
291 EXPECT_EQ(nir_unsigned_upper_bound(b->shader, range_ht, scalar, NULL), 2);
292 _mesa_hash_table_destroy(range_ht, NULL);
293 }
294