1; RUN: llc < %s -march=arm64 -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s 2 3; CHECK-LABEL: stp_int 4; CHECK: stp w0, w1, [x2] 5define void @stp_int(i32 %a, i32 %b, i32* nocapture %p) nounwind { 6 store i32 %a, i32* %p, align 4 7 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1 8 store i32 %b, i32* %add.ptr, align 4 9 ret void 10} 11 12; CHECK-LABEL: stp_long 13; CHECK: stp x0, x1, [x2] 14define void @stp_long(i64 %a, i64 %b, i64* nocapture %p) nounwind { 15 store i64 %a, i64* %p, align 8 16 %add.ptr = getelementptr inbounds i64, i64* %p, i64 1 17 store i64 %b, i64* %add.ptr, align 8 18 ret void 19} 20 21; CHECK-LABEL: stp_float 22; CHECK: stp s0, s1, [x0] 23define void @stp_float(float %a, float %b, float* nocapture %p) nounwind { 24 store float %a, float* %p, align 4 25 %add.ptr = getelementptr inbounds float, float* %p, i64 1 26 store float %b, float* %add.ptr, align 4 27 ret void 28} 29 30; CHECK-LABEL: stp_double 31; CHECK: stp d0, d1, [x0] 32define void @stp_double(double %a, double %b, double* nocapture %p) nounwind { 33 store double %a, double* %p, align 8 34 %add.ptr = getelementptr inbounds double, double* %p, i64 1 35 store double %b, double* %add.ptr, align 8 36 ret void 37} 38 39; Test the load/store optimizer---combine ldurs into a ldp, if appropriate 40define void @stur_int(i32 %a, i32 %b, i32* nocapture %p) nounwind { 41; CHECK-LABEL: stur_int 42; CHECK: stp w{{[0-9]+}}, {{w[0-9]+}}, [x{{[0-9]+}}, #-8] 43; CHECK-NEXT: ret 44 %p1 = getelementptr inbounds i32, i32* %p, i32 -1 45 store i32 %a, i32* %p1, align 2 46 %p2 = getelementptr inbounds i32, i32* %p, i32 -2 47 store i32 %b, i32* %p2, align 2 48 ret void 49} 50 51define void @stur_long(i64 %a, i64 %b, i64* nocapture %p) nounwind { 52; CHECK-LABEL: stur_long 53; CHECK: stp x{{[0-9]+}}, {{x[0-9]+}}, [x{{[0-9]+}}, #-16] 54; CHECK-NEXT: ret 55 %p1 = getelementptr inbounds i64, i64* %p, i32 -1 56 store i64 %a, i64* %p1, align 2 57 %p2 = getelementptr inbounds i64, i64* %p, i32 -2 58 store i64 %b, i64* %p2, align 2 59 ret void 60} 61 62define void @stur_float(float %a, float %b, float* nocapture %p) nounwind { 63; CHECK-LABEL: stur_float 64; CHECK: stp s{{[0-9]+}}, {{s[0-9]+}}, [x{{[0-9]+}}, #-8] 65; CHECK-NEXT: ret 66 %p1 = getelementptr inbounds float, float* %p, i32 -1 67 store float %a, float* %p1, align 2 68 %p2 = getelementptr inbounds float, float* %p, i32 -2 69 store float %b, float* %p2, align 2 70 ret void 71} 72 73define void @stur_double(double %a, double %b, double* nocapture %p) nounwind { 74; CHECK-LABEL: stur_double 75; CHECK: stp d{{[0-9]+}}, {{d[0-9]+}}, [x{{[0-9]+}}, #-16] 76; CHECK-NEXT: ret 77 %p1 = getelementptr inbounds double, double* %p, i32 -1 78 store double %a, double* %p1, align 2 79 %p2 = getelementptr inbounds double, double* %p, i32 -2 80 store double %b, double* %p2, align 2 81 ret void 82} 83 84define void @splat_v4i32(i32 %v, i32 *%p) { 85entry: 86 87; CHECK-LABEL: splat_v4i32 88; CHECK-DAG: stp w0, w0, [x1] 89; CHECK-DAG: stp w0, w0, [x1, #8] 90; CHECK: ret 91 92 %p17 = insertelement <4 x i32> undef, i32 %v, i32 0 93 %p18 = insertelement <4 x i32> %p17, i32 %v, i32 1 94 %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2 95 %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3 96 %p21 = bitcast i32* %p to <4 x i32>* 97 store <4 x i32> %p20, <4 x i32>* %p21, align 4 98 ret void 99} 100 101; Read of %b to compute %tmp2 shouldn't prevent formation of stp 102; CHECK-LABEL: stp_int_rar_hazard 103; CHECK: ldr [[REG:w[0-9]+]], [x2, #8] 104; CHECK: add w8, [[REG]], w1 105; CHECK: stp w0, w1, [x2] 106; CHECK: ret 107define i32 @stp_int_rar_hazard(i32 %a, i32 %b, i32* nocapture %p) nounwind { 108 store i32 %a, i32* %p, align 4 109 %ld.ptr = getelementptr inbounds i32, i32* %p, i64 2 110 %tmp = load i32, i32* %ld.ptr, align 4 111 %tmp2 = add i32 %tmp, %b 112 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1 113 store i32 %b, i32* %add.ptr, align 4 114 ret i32 %tmp2 115} 116 117; Read of %b to compute %tmp2 shouldn't prevent formation of stp 118; CHECK-LABEL: stp_int_rar_hazard_after 119; CHECK: ldr [[REG:w[0-9]+]], [x3, #4] 120; CHECK: add w0, [[REG]], w2 121; CHECK: stp w1, w2, [x3] 122; CHECK: ret 123define i32 @stp_int_rar_hazard_after(i32 %w0, i32 %a, i32 %b, i32* nocapture %p) nounwind { 124 store i32 %a, i32* %p, align 4 125 %ld.ptr = getelementptr inbounds i32, i32* %p, i64 1 126 %tmp = load i32, i32* %ld.ptr, align 4 127 %tmp2 = add i32 %tmp, %b 128 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1 129 store i32 %b, i32* %add.ptr, align 4 130 ret i32 %tmp2 131} 132