xref: /aosp_15_r20/external/clang/test/CodeGen/builtins-ppc-vsx.c (revision 67e74705e28f6214e480b399dd47ea732279e315)
1*67e74705SXin Li // REQUIRES: powerpc-registered-target
2*67e74705SXin Li // RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
3*67e74705SXin Li // RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
4*67e74705SXin Li #include <altivec.h>
5*67e74705SXin Li 
6*67e74705SXin Li vector bool char vbc = { 0, 1, 0, 1, 0, 1, 0, 1,
7*67e74705SXin Li                          0, 1, 0, 1, 0, 1, 0, 1 };
8*67e74705SXin Li vector signed char vsc = { -8,  9, -10, 11, -12, 13, -14, 15,
9*67e74705SXin Li                            -0,  1,  -2,  3,  -4,  5,  -6,  7};
10*67e74705SXin Li vector unsigned char vuc = { 8,  9, 10, 11, 12, 13, 14, 15,
11*67e74705SXin Li                              0,  1,  2,  3,  4,  5,  6,  7};
12*67e74705SXin Li vector float vf = { -1.5, 2.5, -3.5, 4.5 };
13*67e74705SXin Li vector double vd = { 3.5, -7.5 };
14*67e74705SXin Li vector bool short vbs = { 0, 1, 0, 1, 0, 1, 0, 1 };
15*67e74705SXin Li vector signed short vss = { -1, 2, -3, 4, -5, 6, -7, 8 };
16*67e74705SXin Li vector unsigned short vus = { 0, 1, 2, 3, 4, 5, 6, 7 };
17*67e74705SXin Li vector bool int vbi = { 0, 1, 0, 1 };
18*67e74705SXin Li vector signed int vsi = { -1, 2, -3, 4 };
19*67e74705SXin Li vector unsigned int vui = { 0, 1, 2, 3 };
20*67e74705SXin Li vector bool long long vbll = { 1, 0 };
21*67e74705SXin Li vector signed long long vsll = { 255LL, -937LL };
22*67e74705SXin Li vector unsigned long long vull = { 1447LL, 2894LL };
23*67e74705SXin Li double d = 23.4;
24*67e74705SXin Li float af[4] = {23.4f, 56.7f, 89.0f, 12.3f};
25*67e74705SXin Li double ad[2] = {23.4, 56.7};
26*67e74705SXin Li signed char asc[16] = { -8,  9, -10, 11, -12, 13, -14, 15,
27*67e74705SXin Li                         -0,  1,  -2,  3,  -4,  5,  -6,  7};
28*67e74705SXin Li unsigned char auc[16] = { 8,  9, 10, 11, 12, 13, 14, 15,
29*67e74705SXin Li                           0,  1,  2,  3,  4,  5,  6,  7};
30*67e74705SXin Li signed short ass[8] = { -1, 2, -3, 4, -5, 6, -7, 8 };
31*67e74705SXin Li unsigned short aus[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
32*67e74705SXin Li signed int asi[4] = { -1, 2, -3, 4 };
33*67e74705SXin Li unsigned int aui[4] = { 0, 1, 2, 3 };
34*67e74705SXin Li signed long asl[2] = { -1L, 2L };
35*67e74705SXin Li unsigned long aul[2] = { 1L, 2L };
36*67e74705SXin Li 
37*67e74705SXin Li vector float res_vf;
38*67e74705SXin Li vector double res_vd;
39*67e74705SXin Li vector bool char res_vbc;
40*67e74705SXin Li vector signed char res_vsc;
41*67e74705SXin Li vector unsigned char res_vuc;
42*67e74705SXin Li vector bool short res_vbs;
43*67e74705SXin Li vector signed short res_vss;
44*67e74705SXin Li vector unsigned short res_vus;
45*67e74705SXin Li vector bool int res_vbi;
46*67e74705SXin Li vector signed int res_vsi;
47*67e74705SXin Li vector unsigned int res_vui;
48*67e74705SXin Li vector bool long long res_vbll;
49*67e74705SXin Li vector signed long long res_vsll;
50*67e74705SXin Li vector unsigned long long res_vull;
51*67e74705SXin Li 
52*67e74705SXin Li double res_d;
53*67e74705SXin Li float res_af[4];
54*67e74705SXin Li double res_ad[2];
55*67e74705SXin Li signed char res_asc[16];
56*67e74705SXin Li unsigned char res_auc[16];
57*67e74705SXin Li signed short res_ass[8];
58*67e74705SXin Li unsigned short res_aus[8];
59*67e74705SXin Li signed int res_asi[4];
60*67e74705SXin Li unsigned int res_aui[4];
61*67e74705SXin Li 
dummy()62*67e74705SXin Li void dummy() { }
63*67e74705SXin Li 
test1()64*67e74705SXin Li void test1() {
65*67e74705SXin Li // CHECK-LABEL: define void @test1
66*67e74705SXin Li // CHECK-LE-LABEL: define void @test1
67*67e74705SXin Li 
68*67e74705SXin Li   res_vf = vec_abs(vf);
69*67e74705SXin Li // CHECK: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{[0-9]*}})
70*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{[0-9]*}})
71*67e74705SXin Li 
72*67e74705SXin Li   dummy();
73*67e74705SXin Li // CHECK: call void @dummy()
74*67e74705SXin Li // CHECK-LE: call void @dummy()
75*67e74705SXin Li 
76*67e74705SXin Li   res_vd = vec_add(vd, vd);
77*67e74705SXin Li // CHECK: fadd <2 x double>
78*67e74705SXin Li // CHECK-LE: fadd <2 x double>
79*67e74705SXin Li 
80*67e74705SXin Li   res_vd = vec_and(vbll, vd);
81*67e74705SXin Li // CHECK: and <2 x i64>
82*67e74705SXin Li // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
83*67e74705SXin Li // CHECK-LE: and <2 x i64>
84*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
85*67e74705SXin Li 
86*67e74705SXin Li   res_vd = vec_and(vd, vbll);
87*67e74705SXin Li // CHECK: and <2 x i64>
88*67e74705SXin Li // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
89*67e74705SXin Li // CHECK-LE: and <2 x i64>
90*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
91*67e74705SXin Li 
92*67e74705SXin Li   res_vd = vec_and(vd, vd);
93*67e74705SXin Li // CHECK: and <2 x i64>
94*67e74705SXin Li // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
95*67e74705SXin Li // CHECK-LE: and <2 x i64>
96*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
97*67e74705SXin Li 
98*67e74705SXin Li   dummy();
99*67e74705SXin Li // CHECK: call void @dummy()
100*67e74705SXin Li // CHECK-LE: call void @dummy()
101*67e74705SXin Li 
102*67e74705SXin Li   res_vd = vec_andc(vbll, vd);
103*67e74705SXin Li // CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
104*67e74705SXin Li // CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
105*67e74705SXin Li // CHECK: and <2 x i64>
106*67e74705SXin Li // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
107*67e74705SXin Li // CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
108*67e74705SXin Li // CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
109*67e74705SXin Li // CHECK-LE: and <2 x i64>
110*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
111*67e74705SXin Li 
112*67e74705SXin Li   dummy();
113*67e74705SXin Li // CHECK: call void @dummy()
114*67e74705SXin Li // CHECK-LE: call void @dummy()
115*67e74705SXin Li 
116*67e74705SXin Li   res_vd = vec_andc(vd, vbll);
117*67e74705SXin Li // CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
118*67e74705SXin Li // CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
119*67e74705SXin Li // CHECK: and <2 x i64>
120*67e74705SXin Li // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
121*67e74705SXin Li // CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
122*67e74705SXin Li // CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
123*67e74705SXin Li // CHECK-LE: and <2 x i64>
124*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
125*67e74705SXin Li 
126*67e74705SXin Li   dummy();
127*67e74705SXin Li // CHECK: call void @dummy()
128*67e74705SXin Li 
129*67e74705SXin Li   res_vd = vec_andc(vd, vd);
130*67e74705SXin Li // CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
131*67e74705SXin Li // CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
132*67e74705SXin Li // CHECK: and <2 x i64>
133*67e74705SXin Li // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
134*67e74705SXin Li 
135*67e74705SXin Li   dummy();
136*67e74705SXin Li // CHECK: call void @dummy()
137*67e74705SXin Li // CHECK-LE: call void @dummy()
138*67e74705SXin Li 
139*67e74705SXin Li   res_vd = vec_ceil(vd);
140*67e74705SXin Li // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
141*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
142*67e74705SXin Li 
143*67e74705SXin Li   res_vf = vec_ceil(vf);
144*67e74705SXin Li // CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
145*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
146*67e74705SXin Li 
147*67e74705SXin Li   res_vbll = vec_cmpeq(vd, vd);
148*67e74705SXin Li // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
149*67e74705SXin Li // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
150*67e74705SXin Li 
151*67e74705SXin Li   res_vbi = vec_cmpeq(vf, vf);
152*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
153*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
154*67e74705SXin Li 
155*67e74705SXin Li   res_vbll = vec_cmpge(vd, vd);
156*67e74705SXin Li // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
157*67e74705SXin Li // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
158*67e74705SXin Li 
159*67e74705SXin Li   res_vbi = vec_cmpge(vf, vf);
160*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
161*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
162*67e74705SXin Li 
163*67e74705SXin Li   res_vbll = vec_cmpgt(vd, vd);
164*67e74705SXin Li // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
165*67e74705SXin Li // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
166*67e74705SXin Li 
167*67e74705SXin Li   res_vbi = vec_cmpgt(vf, vf);
168*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
169*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
170*67e74705SXin Li 
171*67e74705SXin Li   res_vbll = vec_cmple(vd, vd);
172*67e74705SXin Li // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
173*67e74705SXin Li // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
174*67e74705SXin Li 
175*67e74705SXin Li   res_vbi = vec_cmple(vf, vf);
176*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
177*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
178*67e74705SXin Li 
179*67e74705SXin Li   res_vbll = vec_cmplt(vd, vd);
180*67e74705SXin Li // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
181*67e74705SXin Li // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
182*67e74705SXin Li 
183*67e74705SXin Li   res_vbi = vec_cmplt(vf, vf);
184*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
185*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
186*67e74705SXin Li 
187*67e74705SXin Li   /* vec_cpsgn */
188*67e74705SXin Li   res_vf = vec_cpsgn(vf, vf);
189*67e74705SXin Li // CHECK: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
190*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
191*67e74705SXin Li 
192*67e74705SXin Li   res_vd = vec_cpsgn(vd, vd);
193*67e74705SXin Li // CHECK: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
194*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
195*67e74705SXin Li 
196*67e74705SXin Li   /* vec_div */
197*67e74705SXin Li   res_vsll = vec_div(vsll, vsll);
198*67e74705SXin Li // CHECK: sdiv <2 x i64>
199*67e74705SXin Li // CHECK-LE: sdiv <2 x i64>
200*67e74705SXin Li 
201*67e74705SXin Li   res_vull = vec_div(vull, vull);
202*67e74705SXin Li // CHECK: udiv <2 x i64>
203*67e74705SXin Li // CHECK-LE: udiv <2 x i64>
204*67e74705SXin Li 
205*67e74705SXin Li   res_vf = vec_div(vf, vf);
206*67e74705SXin Li // CHECK: fdiv <4 x float>
207*67e74705SXin Li // CHECK-LE: fdiv <4 x float>
208*67e74705SXin Li 
209*67e74705SXin Li   res_vd = vec_div(vd, vd);
210*67e74705SXin Li // CHECK: fdiv <2 x double>
211*67e74705SXin Li // CHECK-LE: fdiv <2 x double>
212*67e74705SXin Li 
213*67e74705SXin Li   /* vec_max */
214*67e74705SXin Li   res_vf = vec_max(vf, vf);
215*67e74705SXin Li // CHECK: @llvm.ppc.vsx.xvmaxsp
216*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.xvmaxsp
217*67e74705SXin Li 
218*67e74705SXin Li   res_vd = vec_max(vd, vd);
219*67e74705SXin Li // CHECK: @llvm.ppc.vsx.xvmaxdp
220*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.xvmaxdp
221*67e74705SXin Li 
222*67e74705SXin Li   res_vf = vec_vmaxfp(vf, vf);
223*67e74705SXin Li // CHECK: @llvm.ppc.vsx.xvmaxsp
224*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.xvmaxsp
225*67e74705SXin Li 
226*67e74705SXin Li   /* vec_min */
227*67e74705SXin Li   res_vf = vec_min(vf, vf);
228*67e74705SXin Li // CHECK: @llvm.ppc.vsx.xvminsp
229*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.xvminsp
230*67e74705SXin Li 
231*67e74705SXin Li   res_vd = vec_min(vd, vd);
232*67e74705SXin Li // CHECK: @llvm.ppc.vsx.xvmindp
233*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.xvmindp
234*67e74705SXin Li 
235*67e74705SXin Li   res_vf = vec_vminfp(vf, vf);
236*67e74705SXin Li // CHECK: @llvm.ppc.vsx.xvminsp
237*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.xvminsp
238*67e74705SXin Li 
239*67e74705SXin Li   res_d = __builtin_vsx_xsmaxdp(d, d);
240*67e74705SXin Li // CHECK: @llvm.ppc.vsx.xsmaxdp
241*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.xsmaxdp
242*67e74705SXin Li 
243*67e74705SXin Li   res_d = __builtin_vsx_xsmindp(d, d);
244*67e74705SXin Li // CHECK: @llvm.ppc.vsx.xsmindp
245*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.xsmindp
246*67e74705SXin Li 
247*67e74705SXin Li   /* vec_perm */
248*67e74705SXin Li   res_vsll = vec_perm(vsll, vsll, vuc);
249*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
250*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
251*67e74705SXin Li 
252*67e74705SXin Li   res_vull = vec_perm(vull, vull, vuc);
253*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
254*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
255*67e74705SXin Li 
256*67e74705SXin Li   res_vbll = vec_perm(vbll, vbll, vuc);
257*67e74705SXin Li // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
258*67e74705SXin Li // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
259*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
260*67e74705SXin Li // CHECK-LE: xor <16 x i8>
261*67e74705SXin Li // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
262*67e74705SXin Li // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
263*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
264*67e74705SXin Li 
265*67e74705SXin Li   res_vf = vec_round(vf);
266*67e74705SXin Li // CHECK: call <4 x float> @llvm.round.v4f32(<4 x float>
267*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float>
268*67e74705SXin Li 
269*67e74705SXin Li   res_vd = vec_round(vd);
270*67e74705SXin Li // CHECK: call <2 x double> @llvm.round.v2f64(<2 x double>
271*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double>
272*67e74705SXin Li 
273*67e74705SXin Li   res_vd = vec_perm(vd, vd, vuc);
274*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
275*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
276*67e74705SXin Li 
277*67e74705SXin Li   res_vd = vec_splat(vd, 1);
278*67e74705SXin Li // CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
279*67e74705SXin Li // CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
280*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
281*67e74705SXin Li // CHECK-LE: xor <16 x i8>
282*67e74705SXin Li // CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
283*67e74705SXin Li // CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
284*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
285*67e74705SXin Li 
286*67e74705SXin Li   res_vbll = vec_splat(vbll, 1);
287*67e74705SXin Li // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
288*67e74705SXin Li // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
289*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
290*67e74705SXin Li // CHECK-LE: xor <16 x i8>
291*67e74705SXin Li // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
292*67e74705SXin Li // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
293*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
294*67e74705SXin Li 
295*67e74705SXin Li   res_vsll =  vec_splat(vsll, 1);
296*67e74705SXin Li // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
297*67e74705SXin Li // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
298*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
299*67e74705SXin Li // CHECK-LE: xor <16 x i8>
300*67e74705SXin Li // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
301*67e74705SXin Li // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
302*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
303*67e74705SXin Li 
304*67e74705SXin Li   res_vull =  vec_splat(vull, 1);
305*67e74705SXin Li // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
306*67e74705SXin Li // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
307*67e74705SXin Li // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
308*67e74705SXin Li // CHECK-LE: xor <16 x i8>
309*67e74705SXin Li // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
310*67e74705SXin Li // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
311*67e74705SXin Li // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
312*67e74705SXin Li 
313*67e74705SXin Li   res_vsi = vec_pack(vsll, vsll);
314*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
315*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
316*67e74705SXin Li 
317*67e74705SXin Li   res_vui = vec_pack(vull, vull);
318*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
319*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
320*67e74705SXin Li 
321*67e74705SXin Li   res_vbi = vec_pack(vbll, vbll);
322*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
323*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
324*67e74705SXin Li 
325*67e74705SXin Li   res_vsll = vec_vperm(vsll, vsll, vuc);
326*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
327*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
328*67e74705SXin Li 
329*67e74705SXin Li   res_vull = vec_vperm(vull, vull, vuc);
330*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
331*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
332*67e74705SXin Li 
333*67e74705SXin Li   res_vd = vec_vperm(vd, vd, vuc);
334*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
335*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
336*67e74705SXin Li 
337*67e74705SXin Li   /* vec_vsx_ld */
338*67e74705SXin Li 
339*67e74705SXin Li   res_vbi = vec_vsx_ld(0, &vbi);
340*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
341*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
342*67e74705SXin Li 
343*67e74705SXin Li   res_vsi = vec_vsx_ld(0, &vsi);
344*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
345*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
346*67e74705SXin Li 
347*67e74705SXin Li   res_vsi = vec_vsx_ld(0, asi);
348*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
349*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
350*67e74705SXin Li 
351*67e74705SXin Li   res_vui = vec_vsx_ld(0, &vui);
352*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
353*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
354*67e74705SXin Li 
355*67e74705SXin Li   res_vui = vec_vsx_ld(0, aui);
356*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
357*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
358*67e74705SXin Li 
359*67e74705SXin Li   res_vf = vec_vsx_ld (0, &vf);
360*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
361*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
362*67e74705SXin Li 
363*67e74705SXin Li   res_vf = vec_vsx_ld (0, af);
364*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
365*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
366*67e74705SXin Li 
367*67e74705SXin Li   res_vsll = vec_vsx_ld(0, &vsll);
368*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvd2x
369*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvd2x
370*67e74705SXin Li 
371*67e74705SXin Li   res_vull = vec_vsx_ld(0, &vull);
372*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvd2x
373*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvd2x
374*67e74705SXin Li 
375*67e74705SXin Li   res_vd = vec_vsx_ld(0, &vd);
376*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvd2x
377*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvd2x
378*67e74705SXin Li 
379*67e74705SXin Li   res_vd = vec_vsx_ld(0, ad);
380*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvd2x
381*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvd2x
382*67e74705SXin Li 
383*67e74705SXin Li   res_vbs = vec_vsx_ld(0, &vbs);
384*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
385*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
386*67e74705SXin Li 
387*67e74705SXin Li   res_vss = vec_vsx_ld(0, &vss);
388*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
389*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
390*67e74705SXin Li 
391*67e74705SXin Li   res_vss = vec_vsx_ld(0, ass);
392*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
393*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
394*67e74705SXin Li 
395*67e74705SXin Li   res_vus = vec_vsx_ld(0, &vus);
396*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
397*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
398*67e74705SXin Li 
399*67e74705SXin Li   res_vus = vec_vsx_ld(0, aus);
400*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
401*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
402*67e74705SXin Li 
403*67e74705SXin Li   res_vbc = vec_vsx_ld(0, &vbc);
404*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
405*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
406*67e74705SXin Li 
407*67e74705SXin Li   res_vsc = vec_vsx_ld(0, &vsc);
408*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
409*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
410*67e74705SXin Li 
411*67e74705SXin Li   res_vuc = vec_vsx_ld(0, &vuc);
412*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
413*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
414*67e74705SXin Li 
415*67e74705SXin Li   res_vsc = vec_vsx_ld(0, asc);
416*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
417*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
418*67e74705SXin Li 
419*67e74705SXin Li   res_vuc = vec_vsx_ld(0, auc);
420*67e74705SXin Li // CHECK: @llvm.ppc.vsx.lxvw4x
421*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.lxvw4x
422*67e74705SXin Li 
423*67e74705SXin Li   /* vec_vsx_st */
424*67e74705SXin Li 
425*67e74705SXin Li   vec_vsx_st(vbi, 0, &res_vbi);
426*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
427*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
428*67e74705SXin Li 
429*67e74705SXin Li   vec_vsx_st(vbi, 0, res_aui);
430*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
431*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
432*67e74705SXin Li 
433*67e74705SXin Li   vec_vsx_st(vbi, 0, res_asi);
434*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
435*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
436*67e74705SXin Li 
437*67e74705SXin Li   vec_vsx_st(vsi, 0, &res_vsi);
438*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
439*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
440*67e74705SXin Li 
441*67e74705SXin Li   vec_vsx_st(vsi, 0, res_asi);
442*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
443*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
444*67e74705SXin Li 
445*67e74705SXin Li   vec_vsx_st(vui, 0, &res_vui);
446*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
447*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
448*67e74705SXin Li 
449*67e74705SXin Li   vec_vsx_st(vui, 0, res_aui);
450*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
451*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
452*67e74705SXin Li 
453*67e74705SXin Li   vec_vsx_st(vf, 0, &res_vf);
454*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
455*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
456*67e74705SXin Li 
457*67e74705SXin Li   vec_vsx_st(vf, 0, res_af);
458*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
459*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
460*67e74705SXin Li 
461*67e74705SXin Li   vec_vsx_st(vsll, 0, &res_vsll);
462*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvd2x
463*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvd2x
464*67e74705SXin Li 
465*67e74705SXin Li   vec_vsx_st(vull, 0, &res_vull);
466*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvd2x
467*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvd2x
468*67e74705SXin Li 
469*67e74705SXin Li   vec_vsx_st(vd, 0, &res_vd);
470*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvd2x
471*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvd2x
472*67e74705SXin Li 
473*67e74705SXin Li   vec_vsx_st(vd, 0, res_ad);
474*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvd2x
475*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvd2x
476*67e74705SXin Li 
477*67e74705SXin Li   vec_vsx_st(vbs, 0, &res_vbs);
478*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
479*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
480*67e74705SXin Li 
481*67e74705SXin Li   vec_vsx_st(vbs, 0, res_aus);
482*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
483*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
484*67e74705SXin Li 
485*67e74705SXin Li   vec_vsx_st(vbs, 0, res_ass);
486*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
487*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
488*67e74705SXin Li 
489*67e74705SXin Li   vec_vsx_st(vss, 0, &res_vss);
490*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
491*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
492*67e74705SXin Li 
493*67e74705SXin Li   vec_vsx_st(vss, 0, res_ass);
494*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
495*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
496*67e74705SXin Li 
497*67e74705SXin Li   vec_vsx_st(vus, 0, &res_vus);
498*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
499*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
500*67e74705SXin Li 
501*67e74705SXin Li   vec_vsx_st(vus, 0, res_aus);
502*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
503*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
504*67e74705SXin Li 
505*67e74705SXin Li   vec_vsx_st(vsc, 0, &res_vsc);
506*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
507*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
508*67e74705SXin Li 
509*67e74705SXin Li   vec_vsx_st(vsc, 0, res_asc);
510*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
511*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
512*67e74705SXin Li 
513*67e74705SXin Li   vec_vsx_st(vuc, 0, &res_vuc);
514*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
515*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
516*67e74705SXin Li 
517*67e74705SXin Li   vec_vsx_st(vuc, 0, res_auc);
518*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
519*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
520*67e74705SXin Li 
521*67e74705SXin Li   vec_vsx_st(vbc, 0, &res_vbc);
522*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
523*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
524*67e74705SXin Li 
525*67e74705SXin Li   vec_vsx_st(vbc, 0, res_asc);
526*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
527*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
528*67e74705SXin Li 
529*67e74705SXin Li   vec_vsx_st(vbc, 0, res_auc);
530*67e74705SXin Li // CHECK: @llvm.ppc.vsx.stxvw4x
531*67e74705SXin Li // CHECK-LE: @llvm.ppc.vsx.stxvw4x
532*67e74705SXin Li 
533*67e74705SXin Li   /* vec_and */
534*67e74705SXin Li   res_vsll = vec_and(vsll, vsll);
535*67e74705SXin Li // CHECK: and <2 x i64>
536*67e74705SXin Li // CHECK-LE: and <2 x i64>
537*67e74705SXin Li 
538*67e74705SXin Li   res_vsll = vec_and(vbll, vsll);
539*67e74705SXin Li // CHECK: and <2 x i64>
540*67e74705SXin Li // CHECK-LE: and <2 x i64>
541*67e74705SXin Li 
542*67e74705SXin Li   res_vsll = vec_and(vsll, vbll);
543*67e74705SXin Li // CHECK: and <2 x i64>
544*67e74705SXin Li // CHECK-LE: and <2 x i64>
545*67e74705SXin Li 
546*67e74705SXin Li   res_vull = vec_and(vull, vull);
547*67e74705SXin Li // CHECK: and <2 x i64>
548*67e74705SXin Li // CHECK-LE: and <2 x i64>
549*67e74705SXin Li 
550*67e74705SXin Li   res_vull = vec_and(vbll, vull);
551*67e74705SXin Li // CHECK: and <2 x i64>
552*67e74705SXin Li // CHECK-LE: and <2 x i64>
553*67e74705SXin Li 
554*67e74705SXin Li   res_vull = vec_and(vull, vbll);
555*67e74705SXin Li // CHECK: and <2 x i64>
556*67e74705SXin Li // CHECK-LE: and <2 x i64>
557*67e74705SXin Li 
558*67e74705SXin Li   res_vbll = vec_and(vbll, vbll);
559*67e74705SXin Li // CHECK: and <2 x i64>
560*67e74705SXin Li // CHECK-LE: and <2 x i64>
561*67e74705SXin Li 
562*67e74705SXin Li   /* vec_vand */
563*67e74705SXin Li   res_vsll = vec_vand(vsll, vsll);
564*67e74705SXin Li // CHECK: and <2 x i64>
565*67e74705SXin Li // CHECK-LE: and <2 x i64>
566*67e74705SXin Li 
567*67e74705SXin Li   res_vsll = vec_vand(vbll, vsll);
568*67e74705SXin Li // CHECK: and <2 x i64>
569*67e74705SXin Li // CHECK-LE: and <2 x i64>
570*67e74705SXin Li 
571*67e74705SXin Li   res_vsll = vec_vand(vsll, vbll);
572*67e74705SXin Li // CHECK: and <2 x i64>
573*67e74705SXin Li // CHECK-LE: and <2 x i64>
574*67e74705SXin Li 
575*67e74705SXin Li   res_vull = vec_vand(vull, vull);
576*67e74705SXin Li // CHECK: and <2 x i64>
577*67e74705SXin Li // CHECK-LE: and <2 x i64>
578*67e74705SXin Li 
579*67e74705SXin Li   res_vull = vec_vand(vbll, vull);
580*67e74705SXin Li // CHECK: and <2 x i64>
581*67e74705SXin Li // CHECK-LE: and <2 x i64>
582*67e74705SXin Li 
583*67e74705SXin Li   res_vull = vec_vand(vull, vbll);
584*67e74705SXin Li // CHECK: and <2 x i64>
585*67e74705SXin Li // CHECK-LE: and <2 x i64>
586*67e74705SXin Li 
587*67e74705SXin Li   res_vbll = vec_vand(vbll, vbll);
588*67e74705SXin Li // CHECK: and <2 x i64>
589*67e74705SXin Li // CHECK-LE: and <2 x i64>
590*67e74705SXin Li 
591*67e74705SXin Li   /* vec_andc */
592*67e74705SXin Li   res_vsll = vec_andc(vsll, vsll);
593*67e74705SXin Li // CHECK: xor <2 x i64>
594*67e74705SXin Li // CHECK: and <2 x i64>
595*67e74705SXin Li // CHECK-LE: xor <2 x i64>
596*67e74705SXin Li // CHECK-LE: and <2 x i64>
597*67e74705SXin Li 
598*67e74705SXin Li   res_vsll = vec_andc(vbll, vsll);
599*67e74705SXin Li // CHECK: xor <2 x i64>
600*67e74705SXin Li // CHECK: and <2 x i64>
601*67e74705SXin Li // CHECK-LE: xor <2 x i64>
602*67e74705SXin Li // CHECK-LE: and <2 x i64>
603*67e74705SXin Li 
604*67e74705SXin Li   res_vsll = vec_andc(vsll, vbll);
605*67e74705SXin Li // CHECK: xor <2 x i64>
606*67e74705SXin Li // CHECK: and <2 x i64>
607*67e74705SXin Li // CHECK-LE: xor <2 x i64>
608*67e74705SXin Li // CHECK-LE: and <2 x i64>
609*67e74705SXin Li 
610*67e74705SXin Li   res_vull = vec_andc(vull, vull);
611*67e74705SXin Li // CHECK: xor <2 x i64>
612*67e74705SXin Li // CHECK: and <2 x i64>
613*67e74705SXin Li // CHECK-LE: xor <2 x i64>
614*67e74705SXin Li // CHECK-LE: and <2 x i64>
615*67e74705SXin Li 
616*67e74705SXin Li   res_vull = vec_andc(vbll, vull);
617*67e74705SXin Li // CHECK: xor <2 x i64>
618*67e74705SXin Li // CHECK: and <2 x i64>
619*67e74705SXin Li // CHECK-LE: xor <2 x i64>
620*67e74705SXin Li // CHECK-LE: and <2 x i64>
621*67e74705SXin Li 
622*67e74705SXin Li   res_vull = vec_andc(vull, vbll);
623*67e74705SXin Li // CHECK: xor <2 x i64>
624*67e74705SXin Li // CHECK: and <2 x i64>
625*67e74705SXin Li // CHECK-LE: xor <2 x i64>
626*67e74705SXin Li // CHECK-LE: and <2 x i64>
627*67e74705SXin Li 
628*67e74705SXin Li   res_vbll = vec_andc(vbll, vbll);
629*67e74705SXin Li // CHECK: xor <2 x i64>
630*67e74705SXin Li // CHECK: and <2 x i64>
631*67e74705SXin Li // CHECK-LE: xor <2 x i64>
632*67e74705SXin Li // CHECK-LE: and <2 x i64>
633*67e74705SXin Li 
634*67e74705SXin Li   res_vf = vec_floor(vf);
635*67e74705SXin Li // CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
636*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
637*67e74705SXin Li 
638*67e74705SXin Li   res_vd = vec_floor(vd);
639*67e74705SXin Li // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
640*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
641*67e74705SXin Li 
642*67e74705SXin Li   res_vf = vec_madd(vf, vf, vf);
643*67e74705SXin Li // CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
644*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
645*67e74705SXin Li 
646*67e74705SXin Li   res_vd = vec_madd(vd, vd, vd);
647*67e74705SXin Li // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
648*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
649*67e74705SXin Li 
650*67e74705SXin Li   /* vec_mergeh */
651*67e74705SXin Li   res_vsll = vec_mergeh(vsll, vsll);
652*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
653*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
654*67e74705SXin Li 
655*67e74705SXin Li   res_vsll = vec_mergeh(vsll, vbll);
656*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
657*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
658*67e74705SXin Li 
659*67e74705SXin Li   res_vsll = vec_mergeh(vbll, vsll);
660*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
661*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
662*67e74705SXin Li 
663*67e74705SXin Li   res_vull = vec_mergeh(vull, vull);
664*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
665*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
666*67e74705SXin Li 
667*67e74705SXin Li   res_vull = vec_mergeh(vull, vbll);
668*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
669*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
670*67e74705SXin Li 
671*67e74705SXin Li   res_vull = vec_mergeh(vbll, vull);
672*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
673*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
674*67e74705SXin Li 
675*67e74705SXin Li   /* vec_mergel */
676*67e74705SXin Li   res_vsll = vec_mergel(vsll, vsll);
677*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
678*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
679*67e74705SXin Li 
680*67e74705SXin Li   res_vsll = vec_mergel(vsll, vbll);
681*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
682*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
683*67e74705SXin Li 
684*67e74705SXin Li   res_vsll = vec_mergel(vbll, vsll);
685*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
686*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
687*67e74705SXin Li 
688*67e74705SXin Li   res_vull = vec_mergel(vull, vull);
689*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
690*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
691*67e74705SXin Li 
692*67e74705SXin Li   res_vull = vec_mergel(vull, vbll);
693*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
694*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
695*67e74705SXin Li 
696*67e74705SXin Li   res_vull = vec_mergel(vbll, vull);
697*67e74705SXin Li // CHECK: @llvm.ppc.altivec.vperm
698*67e74705SXin Li // CHECK-LE: @llvm.ppc.altivec.vperm
699*67e74705SXin Li 
700*67e74705SXin Li   /* vec_msub */
701*67e74705SXin Li   res_vf = vec_msub(vf, vf, vf);
702*67e74705SXin Li // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
703*67e74705SXin Li // CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
704*67e74705SXin Li // CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
705*67e74705SXin Li // CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
706*67e74705SXin Li 
707*67e74705SXin Li   res_vd = vec_msub(vd, vd, vd);
708*67e74705SXin Li // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
709*67e74705SXin Li // CHECK-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
710*67e74705SXin Li // CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
711*67e74705SXin Li // CHECK-LE-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
712*67e74705SXin Li 
713*67e74705SXin Li   res_vsll = vec_mul(vsll, vsll);
714*67e74705SXin Li // CHECK: mul <2 x i64>
715*67e74705SXin Li // CHECK-LE: mul <2 x i64>
716*67e74705SXin Li 
717*67e74705SXin Li   res_vull = vec_mul(vull, vull);
718*67e74705SXin Li // CHECK: mul <2 x i64>
719*67e74705SXin Li // CHECK-LE: mul <2 x i64>
720*67e74705SXin Li 
721*67e74705SXin Li   res_vf = vec_mul(vf, vf);
722*67e74705SXin Li // CHECK: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
723*67e74705SXin Li // CHECK-LE: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
724*67e74705SXin Li 
725*67e74705SXin Li   res_vd = vec_mul(vd, vd);
726*67e74705SXin Li // CHECK: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
727*67e74705SXin Li // CHECK-LE: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
728*67e74705SXin Li 
729*67e74705SXin Li   res_vf = vec_nearbyint(vf);
730*67e74705SXin Li // CHECK: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
731*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
732*67e74705SXin Li 
733*67e74705SXin Li   res_vd = vec_nearbyint(vd);
734*67e74705SXin Li // CHECK: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
735*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
736*67e74705SXin Li 
737*67e74705SXin Li   res_vf = vec_nmadd(vf, vf, vf);
738*67e74705SXin Li // CHECK: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
739*67e74705SXin Li // CHECK-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
740*67e74705SXin Li // CHECK-LE: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
741*67e74705SXin Li // CHECK-LE-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
742*67e74705SXin Li 
743*67e74705SXin Li   res_vd = vec_nmadd(vd, vd, vd);
744*67e74705SXin Li // CHECK: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
745*67e74705SXin Li // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
746*67e74705SXin Li // CHECK-LE: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
747*67e74705SXin Li // CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
748*67e74705SXin Li 
749*67e74705SXin Li   res_vf = vec_nmsub(vf, vf, vf);
750*67e74705SXin Li // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
751*67e74705SXin Li // CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
752*67e74705SXin Li // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
753*67e74705SXin Li // CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
754*67e74705SXin Li // CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
755*67e74705SXin Li // CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
756*67e74705SXin Li 
757*67e74705SXin Li   res_vd = vec_nmsub(vd, vd, vd);
758*67e74705SXin Li // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
759*67e74705SXin Li // CHECK-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
760*67e74705SXin Li // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
761*67e74705SXin Li // CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
762*67e74705SXin Li // CHECK-LE-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
763*67e74705SXin Li // CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
764*67e74705SXin Li 
765*67e74705SXin Li   /* vec_nor */
766*67e74705SXin Li   res_vsll = vec_nor(vsll, vsll);
767*67e74705SXin Li // CHECK: or <2 x i64>
768*67e74705SXin Li // CHECK: xor <2 x i64>
769*67e74705SXin Li // CHECK-LE: or <2 x i64>
770*67e74705SXin Li // CHECK-LE: xor <2 x i64>
771*67e74705SXin Li 
772*67e74705SXin Li   res_vull = vec_nor(vull, vull);
773*67e74705SXin Li // CHECK: or <2 x i64>
774*67e74705SXin Li // CHECK: xor <2 x i64>
775*67e74705SXin Li // CHECK-LE: or <2 x i64>
776*67e74705SXin Li // CHECK-LE: xor <2 x i64>
777*67e74705SXin Li 
778*67e74705SXin Li   res_vull = vec_nor(vbll, vbll);
779*67e74705SXin Li // CHECK: or <2 x i64>
780*67e74705SXin Li // CHECK: xor <2 x i64>
781*67e74705SXin Li // CHECK-LE: or <2 x i64>
782*67e74705SXin Li // CHECK-LE: xor <2 x i64>
783*67e74705SXin Li 
784*67e74705SXin Li   res_vd = vec_nor(vd, vd);
785*67e74705SXin Li // CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
786*67e74705SXin Li // CHECK: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
787*67e74705SXin Li // CHECK-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
788*67e74705SXin Li // CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
789*67e74705SXin Li // CHECK-LE: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
790*67e74705SXin Li // CHECK-LE-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
791*67e74705SXin Li 
792*67e74705SXin Li   /* vec_or */
793*67e74705SXin Li   res_vsll = vec_or(vsll, vsll);
794*67e74705SXin Li // CHECK: or <2 x i64>
795*67e74705SXin Li // CHECK-LE: or <2 x i64>
796*67e74705SXin Li 
797*67e74705SXin Li   res_vsll = vec_or(vbll, vsll);
798*67e74705SXin Li // CHECK: or <2 x i64>
799*67e74705SXin Li // CHECK-LE: or <2 x i64>
800*67e74705SXin Li 
801*67e74705SXin Li   res_vsll = vec_or(vsll, vbll);
802*67e74705SXin Li // CHECK: or <2 x i64>
803*67e74705SXin Li // CHECK-LE: or <2 x i64>
804*67e74705SXin Li 
805*67e74705SXin Li   res_vull = vec_or(vull, vull);
806*67e74705SXin Li // CHECK: or <2 x i64>
807*67e74705SXin Li // CHECK-LE: or <2 x i64>
808*67e74705SXin Li 
809*67e74705SXin Li   res_vull = vec_or(vbll, vull);
810*67e74705SXin Li // CHECK: or <2 x i64>
811*67e74705SXin Li // CHECK-LE: or <2 x i64>
812*67e74705SXin Li 
813*67e74705SXin Li   res_vull = vec_or(vull, vbll);
814*67e74705SXin Li // CHECK: or <2 x i64>
815*67e74705SXin Li // CHECK-LE: or <2 x i64>
816*67e74705SXin Li 
817*67e74705SXin Li   res_vbll = vec_or(vbll, vbll);
818*67e74705SXin Li // CHECK: or <2 x i64>
819*67e74705SXin Li // CHECK-LE: or <2 x i64>
820*67e74705SXin Li 
821*67e74705SXin Li   res_vd = vec_or(vd, vd);
822*67e74705SXin Li // CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
823*67e74705SXin Li // CHECK: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
824*67e74705SXin Li // CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
825*67e74705SXin Li // CHECK-LE: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
826*67e74705SXin Li 
827*67e74705SXin Li   res_vd = vec_or(vbll, vd);
828*67e74705SXin Li // CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
829*67e74705SXin Li // CHECK: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
830*67e74705SXin Li // CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
831*67e74705SXin Li // CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
832*67e74705SXin Li // CHECK-LE: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
833*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>
834*67e74705SXin Li 
835*67e74705SXin Li   res_vd = vec_or(vd, vbll);
836*67e74705SXin Li // CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
837*67e74705SXin Li // CHECK: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
838*67e74705SXin Li // CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
839*67e74705SXin Li // CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
840*67e74705SXin Li // CHECK-LE: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
841*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>
842*67e74705SXin Li 
843*67e74705SXin Li   res_vf = vec_re(vf);
844*67e74705SXin Li // CHECK: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
845*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
846*67e74705SXin Li 
847*67e74705SXin Li   res_vd = vec_re(vd);
848*67e74705SXin Li // CHECK: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
849*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
850*67e74705SXin Li 
851*67e74705SXin Li   res_vf = vec_rint(vf);
852*67e74705SXin Li // CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
853*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
854*67e74705SXin Li 
855*67e74705SXin Li   res_vd = vec_rint(vd);
856*67e74705SXin Li // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
857*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
858*67e74705SXin Li 
859*67e74705SXin Li   res_vf = vec_rsqrte(vf);
860*67e74705SXin Li // CHECK: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
861*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
862*67e74705SXin Li 
863*67e74705SXin Li   res_vd = vec_rsqrte(vd);
864*67e74705SXin Li // CHECK: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
865*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
866*67e74705SXin Li 
867*67e74705SXin Li   dummy();
868*67e74705SXin Li // CHECK: call void @dummy()
869*67e74705SXin Li // CHECK-LE: call void @dummy()
870*67e74705SXin Li 
871*67e74705SXin Li   res_vf = vec_sel(vd, vd, vbll);
872*67e74705SXin Li // CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
873*67e74705SXin Li // CHECK: and <2 x i64> %{{[0-9]+}},
874*67e74705SXin Li // CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
875*67e74705SXin Li // CHECK: or <2 x i64>
876*67e74705SXin Li // CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
877*67e74705SXin Li // CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
878*67e74705SXin Li // CHECK-LE: and <2 x i64> %{{[0-9]+}},
879*67e74705SXin Li // CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
880*67e74705SXin Li // CHECK-LE: or <2 x i64>
881*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
882*67e74705SXin Li 
883*67e74705SXin Li   dummy();
884*67e74705SXin Li // CHECK: call void @dummy()
885*67e74705SXin Li // CHECK-LE: call void @dummy()
886*67e74705SXin Li 
887*67e74705SXin Li   res_vd = vec_sel(vd, vd, vull);
888*67e74705SXin Li // CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
889*67e74705SXin Li // CHECK: and <2 x i64> %{{[0-9]+}},
890*67e74705SXin Li // CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
891*67e74705SXin Li // CHECK: or <2 x i64>
892*67e74705SXin Li // CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
893*67e74705SXin Li // CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
894*67e74705SXin Li // CHECK-LE: and <2 x i64> %{{[0-9]+}},
895*67e74705SXin Li // CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
896*67e74705SXin Li // CHECK-LE: or <2 x i64>
897*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
898*67e74705SXin Li 
899*67e74705SXin Li   res_vf = vec_sqrt(vf);
900*67e74705SXin Li // CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
901*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
902*67e74705SXin Li 
903*67e74705SXin Li   res_vd = vec_sqrt(vd);
904*67e74705SXin Li // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
905*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
906*67e74705SXin Li 
907*67e74705SXin Li   res_vd = vec_sub(vd, vd);
908*67e74705SXin Li // CHECK: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
909*67e74705SXin Li // CHECK-LE: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
910*67e74705SXin Li 
911*67e74705SXin Li   res_vf = vec_trunc(vf);
912*67e74705SXin Li // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
913*67e74705SXin Li // CHECK-LE: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
914*67e74705SXin Li 
915*67e74705SXin Li   res_vd = vec_trunc(vd);
916*67e74705SXin Li // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
917*67e74705SXin Li // CHECK-LE: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
918*67e74705SXin Li 
919*67e74705SXin Li   /* vec_vor */
920*67e74705SXin Li   res_vsll = vec_vor(vsll, vsll);
921*67e74705SXin Li // CHECK: or <2 x i64>
922*67e74705SXin Li // CHECK-LE: or <2 x i64>
923*67e74705SXin Li 
924*67e74705SXin Li   res_vsll = vec_vor(vbll, vsll);
925*67e74705SXin Li // CHECK: or <2 x i64>
926*67e74705SXin Li // CHECK-LE: or <2 x i64>
927*67e74705SXin Li 
928*67e74705SXin Li   res_vsll = vec_vor(vsll, vbll);
929*67e74705SXin Li // CHECK: or <2 x i64>
930*67e74705SXin Li // CHECK-LE: or <2 x i64>
931*67e74705SXin Li 
932*67e74705SXin Li   res_vull = vec_vor(vull, vull);
933*67e74705SXin Li // CHECK: or <2 x i64>
934*67e74705SXin Li // CHECK-LE: or <2 x i64>
935*67e74705SXin Li 
936*67e74705SXin Li   res_vull = vec_vor(vbll, vull);
937*67e74705SXin Li // CHECK: or <2 x i64>
938*67e74705SXin Li // CHECK-LE: or <2 x i64>
939*67e74705SXin Li 
940*67e74705SXin Li   res_vull = vec_vor(vull, vbll);
941*67e74705SXin Li // CHECK: or <2 x i64>
942*67e74705SXin Li // CHECK-LE: or <2 x i64>
943*67e74705SXin Li 
944*67e74705SXin Li   res_vbll = vec_vor(vbll, vbll);
945*67e74705SXin Li // CHECK: or <2 x i64>
946*67e74705SXin Li // CHECK-LE: or <2 x i64>
947*67e74705SXin Li 
948*67e74705SXin Li   /* vec_xor */
949*67e74705SXin Li   res_vsll = vec_xor(vsll, vsll);
950*67e74705SXin Li // CHECK: xor <2 x i64>
951*67e74705SXin Li // CHECK-LE: xor <2 x i64>
952*67e74705SXin Li 
953*67e74705SXin Li   res_vsll = vec_xor(vbll, vsll);
954*67e74705SXin Li // CHECK: xor <2 x i64>
955*67e74705SXin Li // CHECK-LE: xor <2 x i64>
956*67e74705SXin Li 
957*67e74705SXin Li   res_vsll = vec_xor(vsll, vbll);
958*67e74705SXin Li // CHECK: xor <2 x i64>
959*67e74705SXin Li // CHECK-LE: xor <2 x i64>
960*67e74705SXin Li 
961*67e74705SXin Li   res_vull = vec_xor(vull, vull);
962*67e74705SXin Li // CHECK: xor <2 x i64>
963*67e74705SXin Li // CHECK-LE: xor <2 x i64>
964*67e74705SXin Li 
965*67e74705SXin Li   res_vull = vec_xor(vbll, vull);
966*67e74705SXin Li // CHECK: xor <2 x i64>
967*67e74705SXin Li // CHECK-LE: xor <2 x i64>
968*67e74705SXin Li 
969*67e74705SXin Li   res_vull = vec_xor(vull, vbll);
970*67e74705SXin Li // CHECK: xor <2 x i64>
971*67e74705SXin Li // CHECK-LE: xor <2 x i64>
972*67e74705SXin Li 
973*67e74705SXin Li   res_vbll = vec_xor(vbll, vbll);
974*67e74705SXin Li // CHECK: xor <2 x i64>
975*67e74705SXin Li // CHECK-LE: xor <2 x i64>
976*67e74705SXin Li 
977*67e74705SXin Li   dummy();
978*67e74705SXin Li // CHECK: call void @dummy()
979*67e74705SXin Li // CHECK-LE: call void @dummy()
980*67e74705SXin Li 
981*67e74705SXin Li   res_vd = vec_xor(vd, vd);
982*67e74705SXin Li // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
983*67e74705SXin Li // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
984*67e74705SXin Li // CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
985*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
986*67e74705SXin Li 
987*67e74705SXin Li   dummy();
988*67e74705SXin Li // CHECK: call void @dummy()
989*67e74705SXin Li // CHECK-LE: call void @dummy()
990*67e74705SXin Li 
991*67e74705SXin Li   res_vd = vec_xor(vd, vbll);
992*67e74705SXin Li // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
993*67e74705SXin Li // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
994*67e74705SXin Li // CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
995*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
996*67e74705SXin Li 
997*67e74705SXin Li   dummy();
998*67e74705SXin Li // CHECK: call void @dummy()
999*67e74705SXin Li // CHECK-LE: call void @dummy()
1000*67e74705SXin Li 
1001*67e74705SXin Li   res_vd = vec_xor(vbll, vd);
1002*67e74705SXin Li // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
1003*67e74705SXin Li // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
1004*67e74705SXin Li // CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
1005*67e74705SXin Li // CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
1006*67e74705SXin Li 
1007*67e74705SXin Li   /* vec_vxor */
1008*67e74705SXin Li   res_vsll = vec_vxor(vsll, vsll);
1009*67e74705SXin Li // CHECK: xor <2 x i64>
1010*67e74705SXin Li // CHECK-LE: xor <2 x i64>
1011*67e74705SXin Li 
1012*67e74705SXin Li   res_vsll = vec_vxor(vbll, vsll);
1013*67e74705SXin Li // CHECK: xor <2 x i64>
1014*67e74705SXin Li // CHECK-LE: xor <2 x i64>
1015*67e74705SXin Li 
1016*67e74705SXin Li   res_vsll = vec_vxor(vsll, vbll);
1017*67e74705SXin Li // CHECK: xor <2 x i64>
1018*67e74705SXin Li // CHECK-LE: xor <2 x i64>
1019*67e74705SXin Li 
1020*67e74705SXin Li   res_vull = vec_vxor(vull, vull);
1021*67e74705SXin Li // CHECK: xor <2 x i64>
1022*67e74705SXin Li // CHECK-LE: xor <2 x i64>
1023*67e74705SXin Li 
1024*67e74705SXin Li   res_vull = vec_vxor(vbll, vull);
1025*67e74705SXin Li // CHECK: xor <2 x i64>
1026*67e74705SXin Li // CHECK-LE: xor <2 x i64>
1027*67e74705SXin Li 
1028*67e74705SXin Li   res_vull = vec_vxor(vull, vbll);
1029*67e74705SXin Li // CHECK: xor <2 x i64>
1030*67e74705SXin Li // CHECK-LE: xor <2 x i64>
1031*67e74705SXin Li 
1032*67e74705SXin Li   res_vbll = vec_vxor(vbll, vbll);
1033*67e74705SXin Li // CHECK: xor <2 x i64>
1034*67e74705SXin Li // CHECK-LE: xor <2 x i64>
1035*67e74705SXin Li 
1036*67e74705SXin Li   res_vsll = vec_cts(vd, 0);
1037*67e74705SXin Li // CHECK: fmul <2 x double>
1038*67e74705SXin Li // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
1039*67e74705SXin Li // CHECK-LE: fmul <2 x double>
1040*67e74705SXin Li // CHECK-LE: fptosi <2 x double> %{{.*}} to <2 x i64>
1041*67e74705SXin Li 
1042*67e74705SXin Li   res_vsll = vec_cts(vd, 31);
1043*67e74705SXin Li // CHECK: fmul <2 x double>
1044*67e74705SXin Li // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
1045*67e74705SXin Li // CHECK-LE: fmul <2 x double>
1046*67e74705SXin Li // CHECK-LE: fptosi <2 x double> %{{.*}} to <2 x i64>
1047*67e74705SXin Li 
1048*67e74705SXin Li   res_vsll = vec_ctu(vd, 0);
1049*67e74705SXin Li // CHECK: fmul <2 x double>
1050*67e74705SXin Li // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
1051*67e74705SXin Li // CHECK-LE: fmul <2 x double>
1052*67e74705SXin Li // CHECK-LE: fptoui <2 x double> %{{.*}} to <2 x i64>
1053*67e74705SXin Li 
1054*67e74705SXin Li   res_vsll = vec_ctu(vd, 31);
1055*67e74705SXin Li // CHECK: fmul <2 x double>
1056*67e74705SXin Li // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
1057*67e74705SXin Li // CHECK-LE: fmul <2 x double>
1058*67e74705SXin Li // CHECK-LE: fptoui <2 x double> %{{.*}} to <2 x i64>
1059*67e74705SXin Li 
1060*67e74705SXin Li   res_vd = vec_ctf(vsll, 0);
1061*67e74705SXin Li // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
1062*67e74705SXin Li // CHECK: fmul <2 x double>
1063*67e74705SXin Li // CHECK-LE: sitofp <2 x i64> %{{.*}} to <2 x double>
1064*67e74705SXin Li // CHECK-LE: fmul <2 x double>
1065*67e74705SXin Li 
1066*67e74705SXin Li   res_vd = vec_ctf(vsll, 31);
1067*67e74705SXin Li // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
1068*67e74705SXin Li // CHECK: fmul <2 x double>
1069*67e74705SXin Li // CHECK-LE: sitofp <2 x i64> %{{.*}} to <2 x double>
1070*67e74705SXin Li // CHECK-LE: fmul <2 x double>
1071*67e74705SXin Li 
1072*67e74705SXin Li   res_vd = vec_ctf(vull, 0);
1073*67e74705SXin Li // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
1074*67e74705SXin Li // CHECK: fmul <2 x double>
1075*67e74705SXin Li // CHECK-LE: uitofp <2 x i64> %{{.*}} to <2 x double>
1076*67e74705SXin Li // CHECK-LE: fmul <2 x double>
1077*67e74705SXin Li 
1078*67e74705SXin Li   res_vd = vec_ctf(vull, 31);
1079*67e74705SXin Li // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
1080*67e74705SXin Li // CHECK: fmul <2 x double>
1081*67e74705SXin Li // CHECK-LE: uitofp <2 x i64> %{{.*}} to <2 x double>
1082*67e74705SXin Li // CHECK-LE: fmul <2 x double>
1083*67e74705SXin Li }
1084