1; RUN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s 2 3@vf = global <4 x float> <float -1.500000e+00, float 2.500000e+00, float -3.500000e+00, float 4.500000e+00>, align 16 4@vd = global <2 x double> <double 3.500000e+00, double -7.500000e+00>, align 16 5@vsi = global <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, align 16 6@vui = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 7@vsll = global <2 x i64> <i64 255, i64 -937>, align 16 8@vull = global <2 x i64> <i64 1447, i64 2894>, align 16 9@res_vsi = common global <4 x i32> zeroinitializer, align 16 10@res_vui = common global <4 x i32> zeroinitializer, align 16 11@res_vf = common global <4 x float> zeroinitializer, align 16 12@res_vsll = common global <2 x i64> zeroinitializer, align 16 13@res_vull = common global <2 x i64> zeroinitializer, align 16 14@res_vd = common global <2 x double> zeroinitializer, align 16 15 16define void @test1() { 17entry: 18; CHECK-LABEL: test1 19; CHECK: lxvd2x 20 %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*)) 21; CHECK: stxvd2x 22 store <4 x i32> %0, <4 x i32>* @res_vsi, align 16 23; CHECK: lxvd2x 24 %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*)) 25; CHECK: stxvd2x 26 store <4 x i32> %1, <4 x i32>* @res_vui, align 16 27; CHECK: lxvd2x 28 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*)) 29 %3 = bitcast <4 x i32> %2 to <4 x float> 30; CHECK: stxvd2x 31 store <4 x float> %3, <4 x float>* @res_vf, align 16 32; CHECK: lxvd2x 33 %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*)) 34 %5 = bitcast <2 x double> %4 to <2 x i64> 35; CHECK: stxvd2x 36 store <2 x i64> %5, <2 x i64>* @res_vsll, align 16 37; CHECK: lxvd2x 38 %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*)) 39 %7 = bitcast <2 x double> %6 to <2 x i64> 40; CHECK: stxvd2x 41 store <2 x i64> %7, <2 x i64>* @res_vull, align 16 42; CHECK: lxvd2x 43 %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*)) 44; CHECK: stxvd2x 45 store <2 x double> %8, <2 x double>* @res_vd, align 16 46; CHECK: lxvd2x 47 %9 = load <4 x i32>, <4 x i32>* @vsi, align 16 48; CHECK: stxvd2x 49 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*)) 50; CHECK: lxvd2x 51 %10 = load <4 x i32>, <4 x i32>* @vui, align 16 52; CHECK: stxvd2x 53 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*)) 54; CHECK: lxvd2x 55 %11 = load <4 x float>, <4 x float>* @vf, align 16 56 %12 = bitcast <4 x float> %11 to <4 x i32> 57; CHECK: stxvd2x 58 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*)) 59; CHECK: lxvd2x 60 %13 = load <2 x i64>, <2 x i64>* @vsll, align 16 61 %14 = bitcast <2 x i64> %13 to <2 x double> 62; CHECK: stxvd2x 63 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %14, i8* bitcast (<2 x i64>* @res_vsll to i8*)) 64; CHECK: lxvd2x 65 %15 = load <2 x i64>, <2 x i64>* @vull, align 16 66 %16 = bitcast <2 x i64> %15 to <2 x double> 67; CHECK: stxvd2x 68 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %16, i8* bitcast (<2 x i64>* @res_vull to i8*)) 69; CHECK: lxvd2x 70 %17 = load <2 x double>, <2 x double>* @vd, align 16 71; CHECK: stxvd2x 72 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %17, i8* bitcast (<2 x double>* @res_vd to i8*)) 73 ret void 74} 75 76declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*) 77declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*) 78declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*) 79declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*) 80