xref: /aosp_15_r20/external/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll (revision 9880d6810fe72a1726cb53787c6711e909410d58)
1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
3
4; Tests for indirect addressing on SI, which is implemented using dynamic
5; indexing of vectors.
6
7; CHECK-LABEL: {{^}}extract_w_offset:
8; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
9; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
10; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
11; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
12; CHECK: s_mov_b32 m0
13; CHECK-NEXT: v_movrels_b32_e32
14define void @extract_w_offset(float addrspace(1)* %out, i32 %in) {
15entry:
16  %idx = add i32 %in, 1
17  %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %idx
18  store float %elt, float addrspace(1)* %out
19  ret void
20}
21
22; XXX: Could do v_or_b32 directly
23; CHECK-LABEL: {{^}}extract_w_offset_salu_use_vector:
24; CHECK-DAG: s_or_b32
25; CHECK-DAG: s_or_b32
26; CHECK-DAG: s_or_b32
27; CHECK-DAG: s_or_b32
28; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
29; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
30; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
31; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
32; CHECK: s_mov_b32 m0
33; CHECK-NEXT: v_movrels_b32_e32
34define void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) {
35entry:
36  %idx = add i32 %in, 1
37  %vec = or <4 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4>
38  %elt = extractelement <4 x i32> %vec, i32 %idx
39  store i32 %elt, i32 addrspace(1)* %out
40  ret void
41}
42
43; CHECK-LABEL: {{^}}extract_wo_offset:
44; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
45; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
46; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
47; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
48; CHECK: s_mov_b32 m0
49; CHECK-NEXT: v_movrels_b32_e32
50define void @extract_wo_offset(float addrspace(1)* %out, i32 %in) {
51entry:
52  %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in
53  store float %elt, float addrspace(1)* %out
54  ret void
55}
56
57; CHECK-LABEL: {{^}}extract_neg_offset_sgpr:
58; The offset depends on the register that holds the first element of the vector.
59; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
60; CHECK: v_movrels_b32_e32 v{{[0-9]}}, v0
61define void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) {
62entry:
63  %index = add i32 %offset, -512
64  %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
65  store i32 %value, i32 addrspace(1)* %out
66  ret void
67}
68
69; CHECK-LABEL: {{^}}extract_neg_offset_sgpr_loaded:
70; The offset depends on the register that holds the first element of the vector.
71; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
72; CHECK: v_movrels_b32_e32 v{{[0-9]}}, v0
73define void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) {
74entry:
75  %index = add i32 %offset, -512
76  %or = or <4 x i32> %vec0, %vec1
77  %value = extractelement <4 x i32> %or, i32 %index
78  store i32 %value, i32 addrspace(1)* %out
79  ret void
80}
81
82; CHECK-LABEL: {{^}}extract_neg_offset_vgpr:
83; The offset depends on the register that holds the first element of the vector.
84; CHECK: v_readfirstlane_b32
85; CHECK: s_add_i32 m0, m0, 0xfffffe{{[0-9a-z]+}}
86; CHECK-NEXT: v_movrels_b32_e32 v{{[0-9]}}, v0
87; CHECK: s_cbranch_execnz
88define void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) {
89entry:
90  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
91  %index = add i32 %id, -512
92  %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
93  store i32 %value, i32 addrspace(1)* %out
94  ret void
95}
96
97; CHECK-LABEL: {{^}}extract_undef_offset_sgpr:
98define void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
99entry:
100  %ld = load volatile <4 x i32>, <4  x i32> addrspace(1)* %in
101  %value = extractelement <4 x i32> %ld, i32 undef
102  store i32 %value, i32 addrspace(1)* %out
103  ret void
104}
105
106; CHECK-LABEL: {{^}}insert_undef_offset_sgpr_vector_src:
107; CHECK: buffer_load_dwordx4
108; CHECK: s_mov_b32 m0,
109; CHECK-NEXT: v_movreld_b32
110define void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
111entry:
112  %ld = load <4 x i32>, <4  x i32> addrspace(1)* %in
113  %value = insertelement <4 x i32> %ld, i32 5, i32 undef
114  store <4 x i32> %value, <4 x i32> addrspace(1)* %out
115  ret void
116}
117
118; CHECK-LABEL: {{^}}insert_w_offset:
119; CHECK: s_mov_b32 m0
120; CHECK-NEXT: v_movreld_b32_e32
121define void @insert_w_offset(float addrspace(1)* %out, i32 %in) {
122entry:
123  %0 = add i32 %in, 1
124  %1 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %0
125  %2 = extractelement <4 x float> %1, i32 2
126  store float %2, float addrspace(1)* %out
127  ret void
128}
129
130; CHECK-LABEL: {{^}}insert_wo_offset:
131; CHECK: s_mov_b32 m0
132; CHECK-NEXT: v_movreld_b32_e32
133define void @insert_wo_offset(float addrspace(1)* %out, i32 %in) {
134entry:
135  %0 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in
136  %1 = extractelement <4 x float> %0, i32 2
137  store float %1, float addrspace(1)* %out
138  ret void
139}
140
141; CHECK-LABEL: {{^}}insert_neg_offset_sgpr:
142; The offset depends on the register that holds the first element of the vector.
143; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
144; CHECK: v_movreld_b32_e32 v0, v{{[0-9]}}
145define void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) {
146entry:
147  %index = add i32 %offset, -512
148  %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
149  store <4 x i32> %value, <4 x i32> addrspace(1)* %out
150  ret void
151}
152
153; The vector indexed into is originally loaded into an SGPR rather
154; than built with a reg_sequence
155
156; CHECK-LABEL: {{^}}insert_neg_offset_sgpr_loadreg:
157; The offset depends on the register that holds the first element of the vector.
158; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
159; CHECK: v_movreld_b32_e32 v0, v{{[0-9]}}
160define void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) {
161entry:
162  %index = add i32 %offset, -512
163  %value = insertelement <4 x i32> %vec, i32 5, i32 %index
164  store <4 x i32> %value, <4 x i32> addrspace(1)* %out
165  ret void
166}
167
168; CHECK-LABEL: {{^}}insert_neg_offset_vgpr:
169; The offset depends on the register that holds the first element of the vector.
170; CHECK: v_readfirstlane_b32
171; CHECK: s_add_i32 m0, m0, 0xfffffe{{[0-9a-z]+}}
172; CHECK-NEXT: v_movreld_b32_e32 v0, v{{[0-9]}}
173; CHECK: s_cbranch_execnz
174define void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
175entry:
176  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
177  %index = add i32 %id, -512
178  %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
179  store <4 x i32> %value, <4 x i32> addrspace(1)* %out
180  ret void
181}
182
183; CHECK-LABEL: {{^}}insert_neg_inline_offset_vgpr:
184; The offset depends on the register that holds the first element of the vector.
185; CHECK: v_readfirstlane_b32
186; CHECK: s_add_i32 m0, m0, -{{[0-9]+}}
187; CHECK-NEXT: v_movreld_b32_e32 v0, v{{[0-9]}}
188; CHECK: s_cbranch_execnz
189define void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
190entry:
191  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
192  %index = add i32 %id, -16
193  %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
194  store <4 x i32> %value, <4 x i32> addrspace(1)* %out
195  ret void
196}
197
198; When the block is split to insert the loop, make sure any other
199; places that need to be expanded in the same block are also handled.
200
201; CHECK-LABEL: {{^}}extract_vgpr_offset_multiple_in_block:
202
203; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
204; CHECK-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7
205; CHECK-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9
206; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]]
207; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]]
208; CHECK: s_waitcnt vmcnt(0)
209
210; CHECK: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
211
212; CHECK: [[LOOP0:BB[0-9]+_[0-9]+]]:
213; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
214; CHECK: s_mov_b32 m0, vcc_lo
215; CHECK: v_cmp_eq_u32_e32 vcc, m0, [[IDX0]]
216; CHECK: s_and_saveexec_b64 vcc, vcc
217; CHECK-NEXT: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]]
218; CHECK-NEXT: s_xor_b64 exec, exec, vcc
219; CHECK: s_cbranch_execnz [[LOOP0]]
220
221; FIXME: Redundant copy
222; CHECK: s_mov_b64 exec, [[MASK]]
223; CHECK: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec
224
225; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]:
226; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
227; CHECK: s_mov_b32 m0, vcc_lo
228; CHECK: v_cmp_eq_u32_e32 vcc, m0, [[IDX0]]
229; CHECK: s_and_saveexec_b64 vcc, vcc
230; CHECK-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1]]
231; CHECK-NEXT: s_xor_b64 exec, exec, vcc
232; CHECK: s_cbranch_execnz [[LOOP1]]
233
234; CHECK: buffer_store_dword [[MOVREL0]]
235; CHECK: buffer_store_dword [[MOVREL1]]
236define void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
237entry:
238  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
239  %id.ext = zext i32 %id to i64
240  %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
241  %idx0 = load volatile i32, i32 addrspace(1)* %gep
242  %idx1 = add i32 %idx0, 1
243  %val0 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx0
244  %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={SGPR4}" ()
245  %val1 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx1
246  store volatile i32 %val0, i32 addrspace(1)* %out0
247  store volatile i32 %val1, i32 addrspace(1)* %out0
248  %cmp = icmp eq i32 %id, 0
249  br i1 %cmp, label %bb1, label %bb2
250
251bb1:
252  store volatile i32 %live.out.reg, i32 addrspace(1)* undef
253  br label %bb2
254
255bb2:
256  ret void
257}
258
259; CHECK-LABEL: {{^}}insert_vgpr_offset_multiple_in_block:
260; CHECK-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}}
261; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
262; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], s[[S_ELT0]]
263; CHECK-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62
264; CHECK-DAG: s_waitcnt vmcnt(0)
265
266; CHECK: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
267
268; CHECK: [[LOOP0:BB[0-9]+_[0-9]+]]:
269; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
270; CHECK: s_mov_b32 m0, vcc_lo
271; CHECK: v_cmp_eq_u32_e32 vcc, m0, [[IDX0]]
272; CHECK: s_and_saveexec_b64 vcc, vcc
273; CHECK-NEXT: v_movreld_b32_e32 v[[MOVREL0:[0-9]+]], [[INS0]]
274; CHECK-NEXT: s_xor_b64 exec, exec, vcc
275; CHECK: s_cbranch_execnz [[LOOP0]]
276
277; FIXME: Redundant copy
278; CHECK: s_mov_b64 exec, [[MASK]]
279; CHECK: v_mov_b32_e32 [[INS1:v[0-9]+]], 63
280; CHECK: s_mov_b64 [[MASK]], exec
281
282; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]:
283; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
284; CHECK: s_mov_b32 m0, vcc_lo
285; CHECK: v_cmp_eq_u32_e32 vcc, m0, [[IDX0]]
286; CHECK: s_and_saveexec_b64 vcc, vcc
287; CHECK-NEXT: v_movreld_b32_e32 v[[MOVREL1:[0-9]+]], [[INS1]]
288; CHECK-NEXT: s_xor_b64 exec, exec, vcc
289; CHECK: s_cbranch_execnz [[LOOP1]]
290
291; CHECK: buffer_store_dwordx4 v{{\[}}[[MOVREL0]]:
292
293; CHECK: buffer_store_dword [[INS0]]
294define void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 {
295entry:
296  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
297  %id.ext = zext i32 %id to i64
298  %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
299  %idx0 = load volatile i32, i32 addrspace(1)* %gep
300  %idx1 = add i32 %idx0, 1
301  %live.out.val = call i32 asm sideeffect "v_mov_b32 $0, 62", "=v"()
302  %vec1 = insertelement <4 x i32> %vec0, i32 %live.out.val, i32 %idx0
303  %vec2 = insertelement <4 x i32> %vec1, i32 63, i32 %idx1
304  store volatile <4 x i32> %vec2, <4 x i32> addrspace(1)* %out0
305  %cmp = icmp eq i32 %id, 0
306  br i1 %cmp, label %bb1, label %bb2
307
308bb1:
309  store volatile i32 %live.out.val, i32 addrspace(1)* undef
310  br label %bb2
311
312bb2:
313  ret void
314}
315
316; CHECK-LABEL: {{^}}extract_adjacent_blocks:
317; CHECK: s_load_dword [[ARG:s[0-9]+]]
318; CHECK: s_cmp_lg_i32
319; CHECK: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
320
321; CHECK: buffer_load_dwordx4
322; CHECK: s_mov_b32 m0,
323; CHECK: v_movrels_b32_e32
324; CHECK: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
325
326; CHECK: [[BB4]]:
327; CHECK: buffer_load_dwordx4
328; CHECK: s_mov_b32 m0,
329; CHECK: v_movrels_b32_e32
330
331; CHECK: [[ENDBB]]:
332; CHECK: buffer_store_dword
333; CHECK: s_endpgm
334define void @extract_adjacent_blocks(i32 %arg) #0 {
335bb:
336  %tmp = icmp eq i32 %arg, 0
337  br i1 %tmp, label %bb1, label %bb4
338
339bb1:
340  %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
341  %tmp3 = extractelement <4 x float> %tmp2, i32 undef
342  br label %bb7
343
344bb4:
345  %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
346  %tmp6 = extractelement <4 x float> %tmp5, i32 undef
347  br label %bb7
348
349bb7:
350  %tmp8 = phi float [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
351  store volatile float %tmp8, float addrspace(1)* undef
352  ret void
353}
354
355; CHECK-LABEL: {{^}}insert_adjacent_blocks:
356; CHECK: s_load_dword [[ARG:s[0-9]+]]
357; CHECK: s_cmp_lg_i32
358; CHECK: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
359
360; CHECK: buffer_load_dwordx4
361; CHECK: s_mov_b32 m0,
362; CHECK: v_movreld_b32_e32
363; CHECK: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
364
365; CHECK: [[BB4]]:
366; CHECK: buffer_load_dwordx4
367; CHECK: s_mov_b32 m0,
368; CHECK: v_movreld_b32_e32
369
370; CHECK: [[ENDBB]]:
371; CHECK: buffer_store_dword
372; CHECK: s_endpgm
373define void @insert_adjacent_blocks(i32 %arg, float %val0) #0 {
374bb:
375  %tmp = icmp eq i32 %arg, 0
376  br i1 %tmp, label %bb1, label %bb4
377
378bb1:                                              ; preds = %bb
379  %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
380  %tmp3 = insertelement <4 x float> %tmp2, float %val0, i32 undef
381  br label %bb7
382
383bb4:                                              ; preds = %bb
384  %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
385  %tmp6 = insertelement <4 x float> %tmp5, float %val0, i32 undef
386  br label %bb7
387
388bb7:                                              ; preds = %bb4, %bb1
389  %tmp8 = phi <4 x float> [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
390  store volatile <4 x float> %tmp8, <4 x float> addrspace(1)* undef
391  ret void
392}
393
394; FIXME: Should be able to fold zero input to movreld to inline imm?
395
396; CHECK-LABEL: {{^}}multi_same_block:
397; CHECK: s_load_dword [[ARG:s[0-9]+]]
398; CHECK-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
399; CHECK-DAG: s_add_i32 m0, [[ARG]], -16
400; CHECK: v_movreld_b32_e32 v{{[0-9]+}}, [[ZERO]]
401
402; CHECK: s_add_i32 m0, [[ARG]], -14
403; CHECK: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
404
405; CHECK: s_mov_b32 m0, -1
406; CHECK: ds_write_b32
407; CHECK: ds_write_b32
408; CHECK: s_endpgm
409define void @multi_same_block(i32 %arg) #0 {
410bb:
411  %tmp1 = add i32 %arg, -16
412  %tmp2 = insertelement <6 x float> <float 1.700000e+01, float 1.800000e+01, float 1.900000e+01, float 2.000000e+01, float 2.100000e+01, float 2.200000e+01>, float 0.000000e+00, i32 %tmp1
413  %tmp3 = add i32 %arg, -16
414  %tmp4 = insertelement <6 x float> <float 0x40311999A0000000, float 0x40321999A0000000, float 0x40331999A0000000, float 0x40341999A0000000, float 0x40351999A0000000, float 0x40361999A0000000>, float 0x3FB99999A0000000, i32 %tmp3
415  %tmp5 = bitcast <6 x float> %tmp2 to <6 x i32>
416  %tmp6 = extractelement <6 x i32> %tmp5, i32 1
417  %tmp7 = bitcast <6 x float> %tmp4 to <6 x i32>
418  %tmp8 = extractelement <6 x i32> %tmp7, i32 5
419  store volatile i32 %tmp6, i32 addrspace(3)* undef, align 4
420  store volatile i32 %tmp8, i32 addrspace(3)* undef, align 4
421  ret void
422}
423
424; offset puts outside of superegister bounaries, so clamp to 1st element.
425; CHECK-LABEL: {{^}}extract_largest_inbounds_offset:
426; CHECK: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
427; CHECK: s_load_dword [[IDX:s[0-9]+]]
428; CHECK: s_mov_b32 m0, [[IDX]]
429; CHECK-NEXT: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]]
430; CHECK: buffer_store_dword [[EXTRACT]]
431define void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
432entry:
433  %ld = load volatile <4 x i32>, <4  x i32> addrspace(1)* %in
434  %offset = add i32 %idx, 3
435  %value = extractelement <4 x i32> %ld, i32 %offset
436  store i32 %value, i32 addrspace(1)* %out
437  ret void
438}
439
440; CHECK-LABL: {{^}}extract_out_of_bounds_offset:
441; CHECK: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
442; CHECK: s_load_dword [[IDX:s[0-9]+]]
443; CHECK: s_add_i32 m0, [[IDX]], 4
444; CHECK-NEXT: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
445; CHECK: buffer_store_dword [[EXTRACT]]
446define void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
447entry:
448  %ld = load volatile <4 x i32>, <4  x i32> addrspace(1)* %in
449  %offset = add i32 %idx, 4
450  %value = extractelement <4 x i32> %ld, i32 %offset
451  store i32 %value, i32 addrspace(1)* %out
452  ret void
453}
454
455; Test that the or is folded into the base address register instead of
456; added to m0
457
458; GCN-LABEL: {{^}}extractelement_v4i32_or_index:
459; GCN: s_load_dword [[IDX_IN:s[0-9]+]]
460; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
461; GCN-NOT: [[IDX_SHL]]
462; GCN: s_mov_b32 m0, [[IDX_SHL]]
463; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
464define void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) {
465entry:
466  %ld = load volatile <4 x i32>, <4  x i32> addrspace(1)* %in
467  %idx.shl = shl i32 %idx.in, 2
468  %idx = or i32 %idx.shl, 1
469  %value = extractelement <4 x i32> %ld, i32 %idx
470  store i32 %value, i32 addrspace(1)* %out
471  ret void
472}
473
474; GCN-LABEL: {{^}}insertelement_v4f32_or_index:
475; GCN: s_load_dword [[IDX_IN:s[0-9]+]]
476; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
477; GCN-NOT: [[IDX_SHL]]
478; GCN: s_mov_b32 m0, [[IDX_SHL]]
479; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
480define void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %idx.in) nounwind {
481  %idx.shl = shl i32 %idx.in, 2
482  %idx = or i32 %idx.shl, 1
483  %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %idx
484  store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
485  ret void
486}
487
488declare i32 @llvm.amdgcn.workitem.id.x() #1
489
490attributes #0 = { nounwind }
491attributes #1 = { nounwind readnone }
492